|
Class 11: 675 fragments, nominal size 14 lines, similarity 70%
GHData/rasbt_ord-torchhub/hubconf.py: 34-50
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
logits = self.output_layer(x)
return logits
|
GHData/JimpeiYamamoto_myTorch/myResNet50.py: 70-85
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
|
GHData/chaozhong2010_SENet-PyTorch/resnext.py: 67-89
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
|
GHData/Shimwonhyung_torch_mnist/train.py: 42-62
def forward(self, x):
x = self.conv1(x)
x = self.pool1(x)
x = self.relu1(x)
x = self.conv2(x)
x = self.drop2(x)
x = self.pool2(x)
x = self.relu2(x)
x = x.view(-1, 320)
x = self.fc1(x)
x = self.relu1_fc1(x)
x = self.drop1_fc1(x)
x = self.fc2(x)
return x
## 네트워크를 저장하거나 불러오는 함수 작성하기
|
GHData/TahsinPavel_Handwritten-Digit-Recognition-with-PyTorch/CNN.py: 135-154
def forward(self, x):
out = self.c1(x) # [BATCH_SIZE, 16, 24, 24]
out = self.relu1(out)
out = self.maxpool1(out) # [BATCH_SIZE, 16, 12, 12]
out = self.dropout1(out)
out = self.c2(out) # [BATCH_SIZE, 32, 10, 10]
out = self.relu2(out)
out = self.maxpool2(out) # [BATCH_SIZE, 32, 5, 5]
out = self.dropout2(out)
out = out.view(out.size(0), -1) # [BATCH_SIZE, 32*5*5=800]
out = self.fc1(out) # [BATCH_SIZE, 256]
out = self.dropout3(out)
out = self.fc2(out) # [BATCH_SIZE, 10]
return out
# Create CNN
|
GHData/huzi96_Coarse2Fine-PyTorch/networks_trained.py: 289-306
def forward(self, pf, h2, h1):
h1prime = self.d2s(h1)
h = torch.cat([h2, h1prime], 1)
h = self.layer_1(h)
h = self.layer_1a(h)
h = self.layer_1b(h)
hfeat_0 = torch.cat([pf, h], 1)
hfeat = self.layer_3_1(hfeat_0)
hfeat = self.layer_3_2(hfeat)
hfeat = self.layer_3_3(hfeat)
hfeat = hfeat_0 + hfeat
x = self.layer_4(hfeat)
x = self.layer_5(x)
x = self.layer_6(x)
return x
|
GHData/Chaiyanchong_CutMix-PyTorch-master/resnet.py: 62-82
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
|
GHData/Hsankesara_VoxelMorph-PyTorch/voxelmorph3d.py: 92-110
def forward(self, x):
# Encode
encode_block1 = self.conv_encode1(x)
encode_pool1 = self.conv_maxpool1(encode_block1)
encode_block2 = self.conv_encode2(encode_pool1)
encode_pool2 = self.conv_maxpool2(encode_block2)
encode_block3 = self.conv_encode3(encode_pool2)
encode_pool3 = self.conv_maxpool3(encode_block3)
# Bottleneck
bottleneck1 = self.bottleneck(encode_pool3)
# Decode
decode_block3 = self.crop_and_concat(bottleneck1, encode_block3)
cat_layer2 = self.conv_decode3(decode_block3)
decode_block2 = self.crop_and_concat(cat_layer2, encode_block2)
cat_layer1 = self.conv_decode2(decode_block2)
decode_block1 = self.crop_and_concat(cat_layer1, encode_block1)
final_layer = self.final_layer(decode_block1)
return final_layer
|
GHData/muratonuryildirim_PyTorch_Notes/15_ResNet_from_scratch.py: 48-67
def forward(self, x):
residual = x.clone()
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu(x)
x = self.conv3(x)
x = self.bn3(x)
if self.identity_downsample is not None:
residual = self.identity_downsample(residual)
x += residual
x = self.relu(x)
return x
|
GHData/t-vi_lit_torchdrift/model.py: 22-37
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.relu(x)
x = F.max_pool2d(x, 2)
x = self.dropout1(x)
x = torch.flatten(x, 1)
x = self.fc1(x)
x = F.relu(x)
x = self.dropout2(x)
x = self.fc2(x)
output = F.log_softmax(x, dim=1)
return output
|
GHData/TalentBoy2333_RetinaNet-PyTorch-Tutorial/resnet.py: 89-111
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
|
GHData/acholston_PyTorch_Exercises/Ex11-1b.py: 130-148
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch7x7 = self.branch7x7_1(x)
branch7x7 = self.branch7x7_2(branch7x7)
branch7x7 = self.branch7x7_3(branch7x7)
branch7x7dbl = self.branch7x7dbl_1(x)
branch7x7dbl = self.branch7x7dbl_2(branch7x7dbl)
branch7x7dbl = self.branch7x7dbl_3(branch7x7dbl)
branch7x7dbl = self.branch7x7dbl_4(branch7x7dbl)
branch7x7dbl = self.branch7x7dbl_5(branch7x7dbl)
branch_pool = F.avg_pool2d(x, kernel_size=3, padding=1, stride=1)
branch_pool = self.branch_pool(branch_pool)
outputs = torch.cat([branch1x1, branch7x7, branch7x7dbl, branch_pool], 1)
return outputs
|
GHData/dyhan0920_PyramidNet-PyTorch/resnet.py: 63-83
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
|
GHData/ChawDoe_LeNet5-MNIST-PyTorch/model.py: 21-35
def forward(self, x):
y = self.conv1(x)
y = self.relu1(y)
y = self.pool1(y)
y = self.conv2(y)
y = self.relu2(y)
y = self.pool2(y)
y = y.view(y.shape[0], -1)
y = self.fc1(y)
y = self.relu3(y)
y = self.fc2(y)
y = self.relu4(y)
y = self.fc3(y)
y = self.relu5(y)
return y
|
GHData/zhenshen-mla_CIFAR10-in-PyTorch/models.py: 349-384
def forward(self, x):
# 如果从宏观上看,googlenet也是一个通过Inception模块不断加深的结构,但Inception相比于其他结构有一定宽度。
out = self.pre_layers(x)
# [32, 192, 32, 32]
out = self.layer1(out)
# [32, 256, 32, 32]
out = self.layer2(out)
# [32, 480, 32, 32]
out = self.maxpool(out) # maxpool对H×W减半
# [32, 480, 16, 16]
out = self.layer3(out)
# [32, 512, 16, 16]
out = self.layer4(out)
# [32, 512, 16, 16]
out = self.layer5(out)
# [32, 512, 16, 16]
out = self.layer6(out)
# [32, 528, 16, 16]
out = self.layer7(out)
# [32, 832, 16, 16]
out = self.maxpool(out) # maxpool对H×W减半
# [32, 832, 8, 8]
out = self.layer8(out)
# [32, 832, 8, 8]
out = self.layer9(out)
# [32, 1024, 8, 8]
out = self.avgpool(out)
# [32, 1024, 1, 1]
out = out.view(out.size(0), -1)
# [32, 1024]
out = self.linear(out)
return out
# 14年的googlenet和vgg还是比较广泛的使用maxpool进行特征图缩减的,在以后的文章中,基本上都会避免pool的使用,信息丢失有些严重
|
GHData/Windxy_Classic_Network_PyTorch/ResNet34.py: 130-149
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
if self.include_top:
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.fc(x)
return x
# 封装,加载预训练参数
|
GHData/zonghaofan_DRML_torch/network_fer2013_96_96.py: 94-126
def forward(self, x):
"""
:param x: (b, c, h, w)
:return: (b, class_number)
"""
batch_size = x.size(0)
x = self.extractor1(x)
# print(x.shape)
x = self.extractor2(x)
# print(x.shape)
x = self.extractor3(x)
short_cut = x
x = self.bottleneck(x)
x = self.relu(x + short_cut)
x=self.conv(x)
x=self.pool(x)
# print(x.shape)
x = self.avgpool(x)
# print(x.shape)
x = x.view(batch_size, -1)
# print(x.shape)
output=self.classifier(x)
return output
|
GHData/Hsankesara_VoxelMorph-PyTorch/voxelmorph2d.py: 95-113
def forward(self, x):
# Encode
encode_block1 = self.conv_encode1(x)
encode_pool1 = self.conv_maxpool1(encode_block1)
encode_block2 = self.conv_encode2(encode_pool1)
encode_pool2 = self.conv_maxpool2(encode_block2)
encode_block3 = self.conv_encode3(encode_pool2)
encode_pool3 = self.conv_maxpool3(encode_block3)
# Bottleneck
bottleneck1 = self.bottleneck(encode_pool3)
# Decode
decode_block3 = self.crop_and_concat(bottleneck1, encode_block3)
cat_layer2 = self.conv_decode3(decode_block3)
decode_block2 = self.crop_and_concat(cat_layer2, encode_block2)
cat_layer1 = self.conv_decode2(decode_block2)
decode_block1 = self.crop_and_concat(cat_layer1, encode_block1)
final_layer = self.final_layer(decode_block1)
return final_layer
|
GHData/SethurajS_CNN_Architectures_in_PyTorch/Resnet.py: 27-45
def forward(self, x):
identity = x.clone()
x = self.conv1(x)
x = self.batchnorm1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.batchnorm2(x)
x = self.relu(x)
x = self.conv3(x)
x = self.batchnorm3(x)
if self.downsample is not None:
identity = self.downsample(identity)
x += identity
x = self.relu(x)
return x
# Resnet Block
|
GHData/acholston_PyTorch_Exercises/Ex11-1a.py: 120-141
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch7x7 = self.branch7x7_1(x)
branch7x7 = self.branch7x7_2(branch7x7)
branch7x7 = self.branch7x7_3(branch7x7)
branch7x7dbl = self.branch7x7dbl_1(x)
branch7x7dbl = self.branch7x7dbl_2(branch7x7dbl)
branch7x7dbl = self.branch7x7dbl_3(branch7x7dbl)
branch7x7dbl = self.branch7x7dbl_4(branch7x7dbl)
branch7x7dbl = self.branch7x7dbl_5(branch7x7dbl)
branch_pool = F.avg_pool2d(x, kernel_size=3, padding=1, stride=1)
branch_pool = self.branch_pool(branch_pool)
#concat
outputs = [branch1x1, branch7x7, branch7x7dbl, branch_pool]
return torch.cat(outputs, 1)
#Fourth Inception module
|
GHData/ISwordLion_ResNets_PyTorch_IR_Pedestrian_Images/drn_anlatim.py: 114-131
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.drop(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.drop(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
|
GHData/huzi96_Coarse2Fine-PyTorch/networks.py: 284-301
def forward(self, pf, h2, h1):
h1prime = self.d2s(h1)
h = torch.cat([h2, h1prime], 1)
h = self.layer_1(h)
h = self.layer_1a(h)
h = self.layer_1b(h)
hfeat_0 = torch.cat([pf, h], 1)
hfeat = self.layer_3_1(hfeat_0)
hfeat = self.layer_3_2(hfeat)
hfeat = self.layer_3_3(hfeat)
hfeat = hfeat_0 + hfeat
x = self.layer_4(hfeat)
x = self.layer_5(x)
x = self.layer_6(x)
return x
|
GHData/foamliu_InsightFace-PyTorch/models.py: 77-99
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
|
GHData/Xingxiangrui_various_pyTorch_network_structure/group_clsgat_parallel.py: 103-139
def forward(self, x):
# input [ batch, group_channels=512, W=14,H=14]
residual = x
# squeeze out [ batch, group_channels/expand= 256, W, H ]
out = self.conv1(x)
# BN (planes: group_channels//expand= 256), out [ batch, group_channels/expand= 256, W, H ]
out = self.bn1(out)
# out same as above [ batch, group_channels/expand= 256, W,H]
out = self.relu(out)
# same [ batch, group_channels/expand= 256, W,H]
out = self.conv2(out)
# same [ batch, group_channels/expand= 256, W,H]
out = self.bn2(out)
# same [ batch, group_channels/expand= 256, W,H]
out = self.relu(out)
# expand out [ batch, group_channels=512 , W,H]
out = self.conv3(out)
# same as above [ batch, group_channels=512 , W,H]
out = self.bn3(out)
# out = self.ca(out) * out
# out = self.sa(out) * out
if self.downsample is not None:
residual = self.downsample(x)
# same as input [ batch, group_channels=512 , W,H]
out += residual
# same as input [ batch, group_channels=512 , W,H]
out = self.relu(out)
return out
# fixme , torch parallel bottlenect structure
|
GHData/NehaJSarnaik_Deep-Voice-Conversion-using-DNN/VC_model.py: 193-210
def forward(self, input):
# GLU
conv1 = self.conv1(input) * torch.sigmoid(self.conv1_gates(input))
downsample1 = self.downSample1(conv1)
downsample2 = self.downSample2(downsample1)
residual_layer_1 = self.residualLayer1(downsample2)
residual_layer_2 = self.residualLayer2(residual_layer_1)
residual_layer_3 = self.residualLayer3(residual_layer_2)
residual_layer_4 = self.residualLayer4(residual_layer_3)
residual_layer_5 = self.residualLayer5(residual_layer_4)
residual_layer_6 = self.residualLayer6(residual_layer_5)
upSample_layer_1 = self.upSample1(residual_layer_6)
upSample_layer_2 = self.upSample2(upSample_layer_1)
output = self.lastConvLayer(upSample_layer_2)
return output
|
GHData/leichenNUSJ_AAMandDCM/networks_adaCBMA_deform.py: 181-207
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
out = self.ca(out) * out
out = self.sa(out) * out
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
##################################################
|
GHData/Shimwonhyung_torch_mnist/eval.py: 42-62
def forward(self, x):
x = self.conv1(x)
x = self.pool1(x)
x = self.relu1(x)
x = self.conv2(x)
x = self.drop2(x)
x = self.pool2(x)
x = self.relu2(x)
x = x.view(-1, 320)
x = self.fc1(x)
x = self.relu1_fc1(x)
x = self.drop1_fc1(x)
x = self.fc2(x)
return x
## 네트워크를 저장하거나 불러오는 함수 작성하기
|
GHData/chriselrod_LeNetTorch/train.py: 35-49
def forward(self, x):
x = self.conv0(x)
x = self.r0(x)
x = self.mp0(x)
x = self.conv1(x)
x = self.r1(x)
x = self.mp1(x)
x = self.d0(x.view(x.shape[0], -1))
x = self.r2(x)
x = self.d1(x)
x = self.r3(x)
x = self.d2(x)
x = self.out(x)
return x
|
GHData/SatoKeiju_TWICE-MetricLearning-PyTorch/models.py: 22-42
def forward(self, x):
x = F.relu(self.conv1(x))
x = self.bn1(x)
x = self.maxpool(x)
x = F.relu(self.conv2(x))
x = self.bn2(x)
x = self.maxpool(x)
x = F.relu(self.conv3(x))
x = self.bn3(x)
x = self.maxpool(x)
x = self.conv4(x)
x = self.bn4(x)
x = self.avgpool(x)
# x = self.dropout(x)
# x = x.view(-1, 128*5*5)
# x = F.relu(self.fc(x))
# x = self.classifier(x)
return x
|
GHData/zihaog0724_Fully-Convolutional-Network-PyTorch/fcn_model.py: 43-69
def forward(self, x):
x = self.stage1(x)
s1 = x # bs, 128, 63, 63
s1 = self.pad(s1) # bs, 128, 64, 64
x = self.stage2(x)
s2 = x # bs, 256, 32, 32
x = self.stage3(x)
s3 = x # bs, 512, 16, 16
s3 = self.scores1(s3) # bs, 2, 16, 16
s3 = self.upsample_2x(s3) # bs, 2, 32, 32
s2 = self.scores2(s2) # bs, 2, 32, 32
s2 = s2 + s3 # bs, 2, 32, 32
s1 = self.scores3(s1) # bs, 2, 64, 64
s2 = self.upsample_4x(s2) # bs, 2, 64, 64
s = s1 + s2 # bs, 2, 64, 64
s = self.upsample_8x(s) # bs, 2, 512, 512
return s
|
GHData/miraclewkf_ResNeXt-PyTorch/resnext.py: 132-149
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
|
GHData/kose_PyTorch_MNIST_LSTM/mnist.py: 67-82
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.relu(x)
x = F.max_pool2d(x, 2)
x = self.dropout1(x)
x = torch.flatten(x, 1)
x = self.fc1(x)
x = F.relu(x)
x = self.dropout2(x)
x = self.fc2(x)
output = F.log_softmax(x, dim=1)
return output
|
GHData/ShaoXiang23_Hourglass_PyTorch/model.py: 20-36
def forward(self, x):
residual = x
out = self.bn(x)
out = self.relu(out)
out = self.conv1(out)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
if self.numIn != self.numOut:
residual = self.conv4(x)
return out + residual
|
GHData/yaoyi30_ResNet_Image_Classification_PyTorch/model.py: 140-158
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
if self.include_top:
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.fc(x)
return x
|
GHData/pirunita_SiamMask_OPN_PyTorch/resnet.py: 80-106
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
if out.size() != residual.size():
print(out.size(), residual.size())
out += residual
out = self.relu(out)
return out
|
GHData/pirunita_SiamMask_OPN_PyTorch/resnet.py: 123-148
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
s = residual.size(3)
residual = residual[:, :, 1:s-1, 1:s-1]
out += residual
out = self.relu(out)
return out
|
GHData/UncleThree0402_PyTorch_FFN_HeartDisease/train.py: 94-111
def forward(self, x):
x = self.input(x)
x = nn.LeakyReLU()(x)
x = self.d1(x)
x = self.ll1(x)
x = nn.LeakyReLU()(x)
x = self.d2(x)
x = self.ll2(x)
x = nn.LeakyReLU()(x)
x = self.d3(x)
x = self.ll3(x)
x = nn.LeakyReLU()(x)
x = self.output(x)
return x
# Loss_fn optimizer lr_scheduler
|
GHData/ilex-paraguariensis_torch_modules/resnet3d.py: 201-223
def forward(self, x):
x = x.permute(0, 2, 3, 4, 1)
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
if not self.no_max_pool:
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = nn.Sigmoid()(self.fc(x))
return x.squeeze()
|
GHData/matthewfeickert_nvidia-gpu-ml-library-test/torch_MNIST.py: 22-37
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.relu(x)
x = F.max_pool2d(x, 2)
x = self.dropout1(x)
x = torch.flatten(x, 1)
x = self.fc1(x)
x = F.relu(x)
x = self.dropout2(x)
x = self.fc2(x)
output = F.log_softmax(x, dim=1)
return output
|
GHData/RayTeen_ACGAN.PyTorch/models.py: 27-48
def forward(self, input):
x = input.view(input.size(0), -1, 1, 1)
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu(x)
x = self.conv3(x)
x = self.bn3(x)
x = self.relu(x)
x = self.conv4(x)
x = self.bn4(x)
x = self.relu(x)
x = self.conv5(x)
output = self.tanh(x)
return output
|
GHData/yaoceyi_CenterNet-PyTorch/model.py: 175-193
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.deconv_layers(x)
hm = self.hm(x).sigmoid_()
wh = self.wh(x)
reg = self.reg(x)
return hm, wh, reg
# 初始化转置卷积和BN
|
GHData/victorist_InstallingPyTorchM1GPU/main.py: 21-36
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.relu(x)
x = F.max_pool2d(x, 2)
x = self.dropout1(x)
x = torch.flatten(x, 1)
x = self.fc1(x)
x = F.relu(x)
x = self.dropout2(x)
x = self.fc2(x)
output = F.log_softmax(x, dim=1)
return output
|
GHData/jsesr_CSE-GResNet-PyTorch/GResNet.py: 185-213
def forward(self, x):
x, bs, t = modify_input(x)
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
# x:[bs*t, c'*M, 1, 1]->[bs*t, c'*M, 1, 1]
x = self.avgpool(x)
# x:[bs*t, c'*M, 1, 1]->[bs, t, c', 1, 1]
x = torch.max(x.view(bs * t, -1, self.M), dim=2)[0]
# x:[bs*t, c', 1, 1]->[bs, t, c']
x = x.reshape(bs, t, -1)
# x:[bs, t, c']->[bs, t, num_classes]
x = self.fc(x)
# x:[bs, t, num_classes]->[bs, num_classes]
x = torch.mean(x, dim=1)
return x
|
GHData/ilex-paraguariensis_torch_modules/resnet3d.py: 77-99
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
|
GHData/HoldemGK_torch_step/conv.py: 46-65
def forward(self, x):
x = self.conv1(x)
x = self.act1(x)
x = self.pool1(x)
x = self.conv2(x)
x = self.act2(x)
x = self.pool2(x)
x = x.view(x.size(0), x.size(1) * x.size(2) * x.size(3))
x = self.fc1(x)
x = self.act3(x)
x = self.fc2(x)
x = self.act4(x)
x = self.fc3(x)
return x
|
GHData/sunlanchang_YOLOv1-PyTorch/resnet_yolo.py: 75-96
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
|
GHData/myay_SPICE-Torch/run_fashion_quantized_fi.py: 101-122
def forward(self, x):
#print(self)
x = self.conv1(x)
x = F.max_pool2d(x, 2)
x = self.bn1(x)
x = self.relu(x)
x = self.conv2(x)
x = F.max_pool2d(x, 2)
x = self.bn2(x)
x = self.relu(x)
x = torch.flatten(x, 1)
x = self.fc1(x)
x = self.bn3(x)
x = self.relu(x)
x = self.fc2(x)
x = self.scale(x)
# output = F.log_softmax(x, dim=1)
return x
|
GHData/sunlanchang_YOLOv1-PyTorch/resnet_yolo.py: 179-201
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.layer5(x)
# x = self.avgpool(x)
# x = x.view(x.size(0), -1)
# x = self.fc(x)
x = self.conv_end(x)
x = self.bn_end(x)
x = torch.sigmoid(x) #归一化到0-1
# x = x.view(-1,7,7,30)
x = x.permute(0,2,3,1) #(-1,7,7,30)
return x
|
GHData/WildCatFish_--TianChi-heart-beat-signal-classification-PyTorch-CNN/main.py: 178-194
def forward(self, x):
x = x.unsqueeze(dim=1)
x = self.conv_layer1(x)
x = self.conv_layer2(x)
x = self.conv_layer3(x)
x = self.conv_layer4(x)
x = self.conv_layer5(x)
x = self.conv_layer6(x)
x = x.view(x.size(0), -1)
x = self.full_layer(x)
if self.training:
return x
else:
return self.pred_layer(x)
|
GHData/Eureka-JTX_PyTorch_DDP/model.py: 69-90
def forward(self, x):
identity = x
if self.downsample is not None:
identity = self.downsample(x)
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
out += identity
out = self.relu(out)
return out
|
GHData/KellerJordan_ResNet-PyTorch-CIFAR10/model.py: 64-81
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu1(out)
out = self.conv2(out)
out = self.bn2(out)
if self.use_dropout:
out = self.dropout(out)
if self.projection:
residual = self.projection(x)
out += residual
out = self.relu2(out)
return out
# various projection options to change number of filters in residual connection
# option A from paper
|
GHData/safwankdb_ReCoNet-PyTorch/network.py: 166-183
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
x = self.res1(x)
x = self.res2(x)
x = self.res3(x)
x = self.res4(x)
x = self.res5(x)
features = x
x = self.deconv1(x)
x = self.deconv2(x)
x = self.deconv3(x)
return (features, x)
|
GHData/tanreinama_XceptionHourgrass---PyTorch/xceptionhourgrass.py: 130-148
def forward(self, input):
x = self.conv1(input)
x = self.bn1(x)
x = self.mish(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.mish(x)
out1 = self.block1(x)
x = self.bn3(out1)
x = self.mish(x)
out2 = self.block2(x)
r = self.sigmoid(out1 + out2)
r = F.interpolate(r, scale_factor=2)
r = self.conv3(r)
r = self.sigmoid(r)
return r
|
GHData/randomMatrix77_PyTorch/cycle_gan.py: 209-232
def forward(self, x):
out1 = self.conv1(x)
out1 = self.pool(out1)
out2 = self.conv2(out1)
out2 = self.pool(out2)
out3 = self.middle(out2)
out3 = self.pool(out3)
out4 = self.upsample(out3)
out4 = torch.cat((out2, out4), dim = 1)
out4 = self.up1(out4)
out5 = self.upsample(out4)
out5 = torch.cat((out1, out5), dim = 1)
out5 = self.up2(out5)
out6 = self.upsample(out5)
out6 = self.up3(out6)
return out6
|
GHData/foamliu_InsightFace-PyTorch/models.py: 135-155
def forward(self, x):
residual = x
out = self.bn0(x)
out = self.conv1(out)
out = self.bn1(out)
out = self.prelu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.use_se:
out = self.se(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.prelu(out)
return out
|
GHData/Gerryflap_Torch_MNIST_GAN/mnist_gan.py: 33-52
def forward(self, inp):
x = inp.view(-1, self.latent_size, 1, 1)
x = self.conv_1(x)
x = self.bn_1(x)
x = self.activ(x)
x = self.conv_2(x)
x = self.bn_2(x)
x = self.activ(x)
x = self.conv_3(x)
x = self.bn_3(x)
x = self.activ(x)
x = self.conv_4(x)
x = torch.tanh(x) if not self.use_sine else torch.sin(x)
return x
|
GHData/foamliu_InsightFace-PyTorch/models.py: 206-225
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.prelu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.bn2(x)
x = self.dropout(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
x = self.bn3(x)
return x
|
GHData/VishalBalaji321_ResNet-in-PyTorch/resnet_in_pytorch.py: 140-153
def forward(self, x):
x = self.conv_1(x)
# x = self.maxpool(x)
x = self.layer_0(x)
x = self.layer_1(x)
x = self.layer_2(x)
x = self.layer_3(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
|
GHData/ki-ljl_FedProx-PyTorch/model.py: 25-35
def forward(self, data):
x = self.fc1(data)
x = self.sigmoid(x)
x = self.fc2(x)
x = self.sigmoid(x)
x = self.fc3(x)
x = self.sigmoid(x)
x = self.fc4(x)
x = self.sigmoid(x)
return x
|
GHData/jama1017_PyTorch_Fundamentals/cnn.py: 63-88
def forward(self, x):
# Convolution 1
out = self.cnn1(x)
out = self.relu1(out)
# Max pool 1
out = self.maxpool1(out)
# Convolution 2
out = self.cnn2(out)
out = self.relu2(out)
# Max pool 2
out = self.maxpool2(out)
# Resize
# Original size: (100, 32, 7, 7)
# out.size(0): 100
# New out size: (100, 32*7*7)
out = out.view(out.size(0), -1)
# Linear function (readout)
out = self.fc1(out)
return out
|
GHData/qarchli_PyTorch-CNN-FashionMNIST/model.py: 22-51
def forward(self, tens):
"""
input tensor forward propagation implementation.
"""
# hidden conv1 layer
tens = self.conv1(tens)
tens = F.relu(tens)
tens = F.max_pool2d(tens, kernel_size=2, stride=2)
# hidden conv2 layer
tens = self.conv2(tens)
tens = F.relu(tens)
tens = F.max_pool2d(tens, kernel_size=2, stride=2)
# hidden fc1 layer
# flatten the output from previous conv layers
tens = tens.reshape(-1, 12 * 4 * 4)
tens = self.fc1(tens)
tens = F.relu(tens)
# hidden fc2 layer
tens = self.fc2(tens)
tens = F.relu(tens)
# output layer
# NB: output layer does not get activated because it will be later in the loss computation
tens = self.out(tens)
return tens
|
GHData/Gerryflap_Torch_MNIST_GAN/mnist_wgangp.py: 33-52
def forward(self, inp):
x = inp.view(-1, self.latent_size, 1, 1)
x = self.conv_1(x)
x = self.bn_1(x)
x = self.activ(x)
x = self.conv_2(x)
x = self.bn_2(x)
x = self.activ(x)
x = self.conv_3(x)
x = self.bn_3(x)
x = self.activ(x)
x = self.conv_4(x)
x = torch.tanh(x) if not self.use_sine else torch.sin(x)
return x
|
GHData/ahmedfadhil_DL-PyTorch/cnn_ffnn.py: 49-72
def forward(self, x):
# Convolution 1
out = self.cnn1(x)
out = self.relu1(out)
# Max pool 1
out = self.maxpool1(out)
# Convolution 2
out = self.cnn2(out)
out = self.relu2(out)
# Max pool 2
out = self.maxpool2(out)
# Resize
out = out.view(out.size(0), -1)
# Linear function (readout)
out = self.fc1(out)
return out
# Step4: instantiate model class
|
GHData/SWHL_PyTorchDDP/demo_distributed_launch.py: 58-73
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.relu(x)
x = F.max_pool2d(x, 2)
x = self.dropout1(x)
x = torch.flatten(x, 1)
x = self.fc1(x)
x = F.relu(x)
x = self.dropout2(x)
x = self.fc2(x)
output = F.log_softmax(x, dim=1)
return output
|
GHData/Pablo-163_Neural_Network_by_PyTorch/MNIST_task_by_Conv_NN.py: 64-83
def forward(self, x):
x = self.conv1(x)
x = self.act1(x)
x = self.pool1(x)
x = self.conv2(x)
x = self.act2(x)
x = self.pool2(x)
x = x.view(x.size(0), x.size(1) * x.size(2) * x.size(3))
x = self.fc1(x)
x = self.act3(x)
x = self.fc2(x)
x = self.act4(x)
x = self.fc3(x)
return x
|
GHData/Kodamayuto2001_PyTorch_CNN/train_cnn_dataaugmentation.py: 36-55
def forward(self,x):
x = self.conv1(x)
# print(x.size())
x = torch.relu(x)
x = self.bn1(x)
x = self.pool(x)
# print(x.size())
x = self.conv2(x)
# print(x.size())
x = torch.relu(x)
x = self.bn2(x)
x = self.pool(x)
# print(x.size())
x = x.view(-1,16*38*38)
x = self.fc1(x)
x = torch.relu(x)
x = self.fc2(x)
return F.log_softmax(x,dim=1)
|
GHData/devbruce_torch-implementations/fpn.py: 50-71
def forward(self, x):
# Bottom-up
c1 = self.make_c1(x)
c2 = self.make_c2(c1)
c3 = self.make_c3(c2)
c4 = self.make_c4(c3)
c5 = self.make_c5(c4)
# Top-down
p5 = self.c5_conv1x1(c5)
p4 = self._upsample_element_wise_add(top=p5, bottom=self.lateral_c4(c4))
p3 = self._upsample_element_wise_add(top=p4, bottom=self.lateral_c3(c3))
p2 = self._upsample_element_wise_add(top=p3, bottom=self.lateral_c2(c2))
# Smooth
p4 = self.smooth_p4(p4)
p3 = self.smooth_p3(p3)
p2 = self.smooth_p2(p2)
return p2, p3, p4, p5
|
GHData/souravsingh_ENet-PyTorch/model.py: 463-482
def forward(self, input):
output = self.input_conv(input)
output = self.input_batch_norm(output)
output = F.relu(output)
output = self.middle_conv(output)
output = self.middle_batch_norm(output)
output = F.relu(output)
output = self.output_conv(output)
output = self.output_batch_norm(output)
return output
|
GHData/Kodamayuto2001_PyTorch_CNN/train_cnn.py: 36-55
def forward(self,x):
x = self.conv1(x)
# print(x.size())
x = torch.relu(x)
x = self.bn1(x)
x = self.pool(x)
# print(x.size())
x = self.conv2(x)
# print(x.size())
x = torch.relu(x)
x = self.bn2(x)
x = self.pool(x)
# print(x.size())
x = x.view(-1,16*38*38)
x = self.fc1(x)
x = torch.relu(x)
x = self.fc2(x)
return F.log_softmax(x,dim=1)
|
GHData/sharathmaidargi_finetune_torchvision/inception.py: 210-229
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch7x7 = self.branch7x7_1(x)
branch7x7 = self.branch7x7_2(branch7x7)
branch7x7 = self.branch7x7_3(branch7x7)
branch7x7dbl = self.branch7x7dbl_1(x)
branch7x7dbl = self.branch7x7dbl_2(branch7x7dbl)
branch7x7dbl = self.branch7x7dbl_3(branch7x7dbl)
branch7x7dbl = self.branch7x7dbl_4(branch7x7dbl)
branch7x7dbl = self.branch7x7dbl_5(branch7x7dbl)
branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1)
branch_pool = self.branch_pool(branch_pool)
outputs = [branch1x1, branch7x7, branch7x7dbl, branch_pool]
return torch.cat(outputs, 1)
|
GHData/marcoseraphin_PyTorch_Course/mnist_sample.py: 21-36
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.relu(x)
x = F.max_pool2d(x, 2)
x = self.dropout1(x)
x = torch.flatten(x, 1)
x = self.fc1(x)
x = F.relu(x)
x = self.dropout2(x)
x = self.fc2(x)
output = F.log_softmax(x, dim=1)
return output
|
GHData/ElifCerenGok_PyTorch_Notes/lenet_model.py: 19-34
def forward(self,x):
x = self.conv1(x)
x = self.relu(x)
x = self.pool(x)
x = self.conv2(x)
x = self.relu(x)
x = self.pool(x)
x = self.conv3(x)
x = self.relu(x)
# output of conv3 layer is num_examples x 120 x 1 x 1 but to proceed it to Linear layer we need to convert
# the shape num_examples x 120
x = x.reshape(x.shape[0], -1) # the -1 will concatenate 120x1x1
x = self.fc1(x)
x = self.relu(x)
x = self.output(x)
return x
|
GHData/ricohasgithub_MNIST_PyTorch/CNN.py: 27-46
def forward(self,x):
#first Convolutional layers
out=self.cnn1(x)
#activation function
out=self.relu1(out)
#max pooling
out=self.maxpool1(out)
#first Convolutional layers
out=self.cnn2(out)
#activation function
out=self.relu2(out)
#max pooling
out=self.maxpool2(out)
#flatten output
out=out.view(out.size(0),-1)
#fully connected layer
out=self.fc1(out)
return out
|
GHData/SWHL_PyTorchDDP/demo_multiprocessing.py: 39-54
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.relu(x)
x = F.max_pool2d(x, 2)
x = self.dropout1(x)
x = torch.flatten(x, 1)
x = self.fc1(x)
x = F.relu(x)
x = self.dropout2(x)
x = self.fc2(x)
output = F.log_softmax(x, dim=1)
return output
|
GHData/Sbaig3229_PyTorch-implementation-of-L2CS-Net-without-CUDA/model.py: 53-73
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
# gaze
pre_yaw_gaze = self.fc_yaw_gaze(x)
pre_pitch_gaze = self.fc_pitch_gaze(x)
return pre_yaw_gaze, pre_pitch_gaze
|
GHData/shreyas-kowshik_torch-CycleGAN/net.py: 40-49
def forward(self,x):
out = self.conv1(x)
out = self.conv2(out)
out = self.conv3(out)
out = self.conv4(out)
out = self.conv5(out)
out = self.conv6(out)
out = self.conv7(out)
out = self.conv8(out)
return out
|
GHData/yshiyi_Deep-Neural-Networks-with-PyTorch/Chapter09_05CNNwithMNIST.py: 119-130
def forward(self, x):
x = self.cnn1(x)
x = torch.relu(x)
x = self.maxpool1(x)
x = self.cnn2(x)
x = torch.relu(x)
x = self.maxpool2(x)
x = x.view(x.size(0), -1)
x = self.fc1(x)
return x
# Outputs in each steps
|
GHData/BoriaK_AutoEncoder_PyTorch/models.py: 220-234
def forward(self, x):
x = self.unflat(x)
x = self.l1(x)
x = self.l2(x)
x = self.l3(x)
x = self.l4(x)
x = self.l5(x)
x = self.l6(x)
x = self.fl(x)
# x = self.fc1(self.flat(x))
return x
|
GHData/BoriaK_AutoEncoder_PyTorch/models_from_Avi.py: 201-215
def forward(self, x):
x = self.unflat(x)
x = self.l1(x)
x = self.l2(x)
x = self.l3(x)
x = self.l4(x)
x = self.l5(x)
x = self.l6(x)
x = self.fl(x)
# x = self.fc1(self.flat(x))
return x
|
GHData/congffu_FCN-Implementaion-PyTorch/FCN.py: 55-79
def forward(self, x): # 352, 480, 3
s1 = self.stage1(x) # 176, 240, 64
s2 = self.stage2(s1) # 88, 120, 128
s3 = self.stage3(s2) # 44, 60, 256
s4 = self.stage4(s3) # 22, 30, 512
s5 = self.stage5(s4) # 11, 15, 512
scores1 = self.scores1(s5) # 11, 15, 12
s5 = self.upsample_2x_1(s5) # 22, 30, 512
add1 = s4 + s5 # 22, 30, 512
scores2 = self.scores2(add1) # 22, 30, 12
add1 = self.conv_trans1(add1) # 22, 30, 256
add1 = self.upsample_2x_2(add1) # 44, 60, 256
add2 = add1 + s3 # 44, 60, 256
add2 = self.conv_trans2(add2) # 44,60,12
scores3 = self.upsample_8x(add2) # 352, 480, 12
return scores3
|
GHData/limberc_DL-without-Weight-Transport-PyTorch/resnet.py: 131-143
def forward(self, x):
output = self.conv1(x)
output = self.conv2_x(output)
output = self.conv3_x(output)
output = self.conv4_x(output)
output = self.conv5_x(output)
output = self.avg_pool(output)
output = output.view(output.size(0), -1)
output = self.fc(output)
return output
|
GHData/mindingyao_MobileNetV2/ResNet.py: 91-101
def forward(self, x):
x = self.pre(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
|
GHData/stabgan_CNN-classification-of-MNIST-dataset-using-pyTorch/cnn.py: 61-86
def forward(self, x):
# Convolution 1
out = self.cnn1(x)
out = self.relu1(out)
# Max pool 1
out = self.maxpool1(out)
# Convolution 2
out = self.cnn2(out)
out = self.relu2(out)
# Max pool 2
out = self.maxpool2(out)
# Resize
# Original size: (100, 32, 7, 7)
# out.size(0): 100
# New out size: (100, 32*7*7)
out = out.view(out.size(0), -1)
# Linear function (readout)
out = self.fc1(out)
return out
|
GHData/kenandaoerdect_ResNet_PyTorch/resnet50_101_152.py: 69-80
def forward(self, input):
out = self.conv1(input)
out = self.conv2_x(out)
out = self.conv3_x(out)
out = self.conv4_x(out)
out = self.conv5_x(out)
out = self.avgpool(out)
out = out.view(out.size(0), -1)
out = self.fc(out)
return out
|
GHData/PacktPublishing_Deep-Learning-Projects-with-PyTorch/cnn.py: 62-82
def forward(self, x):
# C1
out = self.cnn1(x)
out = self.relu1(out)
#Maxpool1
out = self.maxpool1(out)
#c1
out = self.cnn2(out)
out = self.relu2(out)
#Maxpool1
out = self.maxpool2(out)
out = out.view(out.size(0), -1)
#Linear Function
out = self.fc1(out)
return out
|
GHData/goksinan_Intro-to-PyTorch/eg_12_savingModel.py: 32-45
def forward(self, x):
# Pass the input tensor through each of our operations
x = self.hidden_1(x)
x = self.relu(x)
x = self.dropout(x)
x = self.hidden_2(x)
x = self.relu(x)
x = self.dropout(x)
x = self.output(x)
x = self.logsoftmax(x)
return x
# Get the data
|
GHData/goksinan_Intro-to-PyTorch/eg_11_dropout.py: 32-45
def forward(self, x):
# Pass the input tensor through each of our operations
x = self.hidden_1(x)
x = self.relu(x)
x = self.dropout(x)
x = self.hidden_2(x)
x = self.relu(x)
x = self.dropout(x)
x = self.output(x)
x = self.logsoftmax(x)
return x
# Get the data
|
GHData/mohak1_CNN-in-PyTorch/code.py: 50-60
def forward(self, x):
out = self.cnn1(x)
out = self.relu1(out)
out = self.maxpool1(out)
out = self.cnn2(out)
out = self.relu2(out)
out = self.maxpool2(out)
out = out.view(out.size(0), -1)
out = self.fcl(out)
return out
|
GHData/JJASMINE22_DANN_PyTorch/net.py: 31-44
def forward(self, input):
x = self.conv1(input)
x = self.bn1(x)
x = self.leak1(x)
x = self.pool1(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.leak2(x)
output = self.pool2(x)
return output
|
GHData/foamliu_MobileFaceNet-PyTorch/mobilefacenet.py: 169-180
def forward(self, x):
x = self.conv1(x)
x = self.dw_conv(x)
x = self.features(x)
x = self.conv2(x)
x = self.gdconv(x)
x = self.conv3(x)
x = self.bn(x)
x = x.view(x.size(0), -1)
return x
|
GHData/domisProgrammingMemes_PyTorchTut/AutoencoderTut.py: 86-98
def forward(self, x):
activation = self.encoder_hidden_layer(x)
activation = torch.relu(activation)
code = self.encoder_output_layer(activation)
code = torch.sigmoid(code)
activation = self.decoder_hidden_layer(code)
activation = torch.relu(activation)
activation = self.decoder_output_layer(activation)
reconstructed = torch.sigmoid(activation)
return reconstructed
# instantiate the AE
|
GHData/loui0620_pyTorch_bean_classifier/Models.py: 111-124
def forward(self, x):
x = self.conv1(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
|
GHData/bo-10000_ResNet-D_PyTorch/resnetD_3d.py: 220-237
def forward(self, x):
# Input stem
x = self.input_stem(x)
x = self.maxpool(x)
# Stages 1~4
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
# Output layer
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.fc(x)
return x
|
GHData/Nebula4869_PyTorch-gender-age-estimation/model.py: 87-109
def forward(self, x):
# [batch_size * seq_len, 1, input_size, input_size]
x = self.conv1(x)
# [batch_size * seq_len, 64, input_size // 2, input_size // 2]
x = self.max_pool(x)
# [batch_size * seq_len, 64, input_size // 4, input_size // 4]
x = self.layer1(x)
# [batch_size * seq_len, 64, input_size // 4, input_size // 4]
x = self.layer2(x)
# [batch_size * seq_len, 64, input_size // 8, input_size // 8]
x = self.layer3(x)
# [batch_size * seq_len, 64, input_size // 16, input_size // 16]
x = self.layer4(x)
# [batch_size * seq_len, 64, input_size // 32, input_size // 32]
x = self.global_avg_pool(x)
# [batch_size * seq_len, 512 * expansion, 1, 1]
x = x.view(x.size(0), -1)
# [batch_size * seq_len, 512 * expansion]
x = self.fc(x)
# [batch_size * seq_len, num_classes]
return x
|
GHData/bo-10000_ResNet-D_PyTorch/resnetD.py: 220-237
def forward(self, x):
# Input stem
x = self.input_stem(x)
x = self.maxpool(x)
# Stages 1~4
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
# Output layer
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.fc(x)
return x
|
GHData/KellerJordan_ResNet-PyTorch-CIFAR10/model.py: 25-36
def forward(self, x):
out = self.conv1(x)
out = self.norm1(out)
out = self.relu1(out)
out = self.layers1(out)
out = self.layers2(out)
out = self.layers3(out)
out = self.avgpool(out)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
|
GHData/anujdutt9_SqueezeNet-PyTorch/model.py: 43-65
def forward(self, x):
# Input - Squeeze Layer
x = self.squeeze1x1(x)
x = self.bn1(x)
x = self.relu(x)
# Expand Layer
# 1x1 Convolution
out_1 = self.expand1x1(x)
out_1 = self.bn2(out_1)
# 3x3 Convolution
out_2 = self.expand3x3(x)
out_2 = self.bn3(out_2)
# Before sending the output, the outputs of expand layers is concatenated
out = torch.cat([out_1, out_2], dim=1)
out = self.relu(out)
return out
# SqueezeNet Model Class
|
GHData/Smorodov_PRNet_PyTorch_v2/resfcn256.py: 50-72
def forward(self, x):
# shortcut = x
#(_, _, _, x_planes) = x.size()
# if self.stride != 1 or x_planes != self.out_planes:
shortcut = self.shortcut_conv(x)
x = self.conv1(x)
#x = self.bn1(x)
x = self.drop1(x)
x = self.conv2(x)
#x = self.bn2(x)
x = self.drop2(x)
x = self.conv3(x)
#x = self.bn3(x)
x = self.drop3(x)
x += shortcut
x = self.activation_fn(x)
return x
|
GHData/odegeasslbc_Self-Supervised-Sketch-to-Image-Synthesis-PyTorch/models.py: 162-175
def get_feats(self, image):
feat = self.sf_256(image)
feat = self.sf_128(feat)
feat = self.sf_64(feat)
feat_32 = self.sf_32(feat)
feat_16 = self.sf_16(feat_32)
feat_8 = self.sf_8(feat_16)
feat_32 = self.sfv_32(feat_32)
feat_16 = self.sfv_16(feat_16)
feat_8 = self.sfv_8(feat_8)
return feat_32, feat_16, feat_8
|
GHData/ZengyuanYu_PyTorch_60minutes/ResNet.py: 100-112
def forward(self, x):
out = self.conv(x)
out = self.bn(out)
out = self.relu(out)
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.avg_pool(out)
out = out.view(out.size(0), -1)
out = self.fc(out)
return out
|
GHData/ZengyuanYu_PyTorch_60minutes/model.py: 59-79
def forward(self, x):
out = self.layer1(x)
#print('layer1', out.size())
out = self.layer2(out)#shape(batch_size,32,7,7)
#print('layer2', out.size())
out = self.layer3(out)
#print('layer3', out.size())
out = self.layer4(out)
#print('layer4', out.size())
out = self.layer5(out)
#print('layer5', out.size())
out = out.view(out.size(0), -1)#faltten 将数据out铺展 (shape batch_size, 32*7*7)
#out = self.fc1(out)
out = self.layer6(out)
#print('layer6', out.size())
out = self.fc1(out)
out = self.fc2(out)
#print(out)
#print('layer7', out.size())
return out
|
GHData/souravsingh_ENet-PyTorch/model.py: 343-364
def forward(self, input):
output = self.input_conv(input)
output = self.input_batch_norm(output)
output = F.relu(output)
output = self.middle_conv(output)
output = self.middle_batch_norm(output)
output = F.relu(output)
output = self.output_conv(output)
output = self.output_batch_norm(output)
output = self.dropout(output)
return output
|
GHData/rohitgajawada_PyTorchLIP/deeplab_resnet.py: 171-183
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.layer5(x)
return x
|
GHData/Knight-Antonio_CosFace-PyTorch/net.py: 157-171
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.prelu1(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
|
GHData/tqlong_torchtut/tut2_model.py: 66-77
def forward(self, x):
out = self.conv(x)
out = self.bn(out)
out = self.relu(out)
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.avg_pool(out)
out = out.view(out.size(0), -1)
out = self.fc(out)
return out
|
GHData/Hoshi-masahito_DeepLearning_in_PyTorch/CNN-mnist.py: 76-87
def forward(self, x):
out = self.layer1(x)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = self.layer5(out)
out = self.layer6(out)
out = out.reshape(out.size(0), -1)
out = self.fc1(out)
out = self.fc2(out)
return out
|
GHData/shambhavimalik_Model-PyTorch-Implementations/vgg16_pytorch.py: 98-109
def forward(self, x):
conv1 = self.layer1(x)
conv2 = self.layer2(conv1)
conv3 = self.layer3(conv2)
conv4 = self.layer4(conv3)
conv5 = self.layer5(conv4)
fc1_input = conv5.view(conv5.size(0), -1)
fc_1 = self.layer6(fc1_input)
fc_2 = self.layer7(fc_1)
fc_out = self.layer8(fc_2)
return fc_out
|
GHData/chuanli11_WCT-PyTorch/ae.py: 119-130
def forward(self,x):
out = self.reflecPad5(x)
out = self.conv5(out)
out = self.relu5(out)
out = self.unpool(out)
out = self.reflecPad6(out)
out = self.conv6(out)
out = self.relu6(out)
out = self.reflecPad7(out)
out = self.conv7(out)
return out
|
GHData/DableUTeeF_HiResTorch/hardcodedmodels.py: 40-52
def forward(self, x):
out = self.conv1(x)
out = self.bn1(out)
out = F.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = F.relu(out)
out = self.conv3(out)
out = self.bn3(out)
out = F.relu(out)
return out
|
GHData/developer0hye_SKNet-PyTorch/sknet.py: 135-146
def forward(self, x):
fea = self.basic_conv(x)
fea = self.maxpool(fea)
fea = self.stage_1(fea)
fea = self.stage_2(fea)
fea = self.stage_3(fea)
fea = self.stage_4(fea)
fea = self.gap(fea)
fea = torch.squeeze(fea)
fea = self.classifier(fea)
return fea
|
GHData/pradeepsk-23_PyTorch/CIFAR10%20-%20ResNet.py: 87-98
def forward(self, x):
out = self.conv(x)
out = self.bn(out)
out = self.relu(out)
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.avg_pool(out)
out = out.view(out.size(0), -1)
out = self.fc(out)
return out
|
GHData/JJASMINE22_DANN_PyTorch/net.py: 122-138
def forward(self, input):
x = self.conv1(input)
x = self.bn1(x)
x = self.leak1(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.leak2(x)
x = self.max_pool(x).view(-1, 64)
x = self.linear(x)
output = torch.sigmoid(x)
return output
|
GHData/YimingZzz_PyTorch_PG2/resnet.py: 102-114
def forward(self, x):
out = self.conv(x)
out = self.bn(out)
out = self.relu(out)
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.avg_pool(out)
out = out.view(out.size(0), -1)
out = self.fc(out)
return out
|
GHData/YiPrograms_CatDogClassifier/net.py: 61-71
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
x = self.conv4(x)
x = self.conv5(x)
x = x.view(x.size(0), -1)
x = self.fc1(x)
x = self.fc2(x)
x = self.out(x)
return x
|
GHData/shaynaor_ResNet_PyTorch/model.py: 131-145
def forward(self, x):
x = self.conv1(x)
x = self.max_pool(x)
x = self.conv2_x(x)
x = self.conv3_x(x)
x = self.conv4_x(x)
x = self.conv5_x(x)
x = self.avg_pool(x)
x = x.flatten(1)
x = self.fc(x)
return x
|
GHData/hs366399_Image-Super-Resolution-Using-VAE-GAN-with-PyTorch/model.py: 165-177
def forward(self, x):
x = self.trans_conv_1(x)
x = self.layer_1(x)
x = self.trans_conv_2(x)
x = self.layer_2(x)
x = self.trans_conv_3(x)
x = self.layer_3(x)
x = self.trans_conv_4(x)
x = self.trans_conv_5(x)
x = self.output_conv(x)
return x
|
GHData/Unicorn-acc_PyTorch_Study_Record/use_nnseq.py: 25-36
def forward(self,x):
x = self.conv1(x)
x = self.maxpool1(x)
x = self.conv2(x)
x = self.maxpool2(x)
x = self.conv3(x)
x = self.maxpool3(x)
x = self.flatten(x)
x = self.linear1(x)
x = self.linear2(x)
return x
|
GHData/kir3i_MLwithPyTorch/15_ResNet.py: 151-170
def forward(self, x):
# 1, 3, 32, 32
x = self.conv1(x)
# 1, 16, 32, 32
x = self.bn1(x)
x = self.relu(x)
x = self.layer1(x)
# 1, 128, 32, 32
x = self.layer2(x)
# 1, 256, 32, 32
x = self.layer3(x)
# 1, 512, 16, 16
x = self.layer4(x)
# 1, 1024, 8, 8
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
|
GHData/skrish13_CrossTransformers-PyTorch/resnet.py: 201-220
def _forward_impl(self, x):
# See note [TorchScript super()]
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
return x
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.fc(x)
return x
|
GHData/hs366399_Image-Super-Resolution-Using-VAE-GAN-with-PyTorch/model.py: 248-261
def forward(self, x):
x = self.main_conv_1(x)
x = self.main_conv_2(x)
x = self.layer_1(x)
x = self.main_conv_3(x)
x = self.layer_2(x)
x = self.main_conv_4(x)
x = self.layer_3(x)
x = self.main_conv_5(x)
x = self.main_conv_6(x)
x = self.main_conv_7(x)
x = x.view(x.shape[0], -1)
x = self.main_conv_8(x)
return x
|
GHData/vidyadhariGithub_NeuralNetworks_project1/kuzu.py: 57-69
def forward(self, x):
x = self.conv1(x) #convolution layer
x = F.relu(x)
x = self.conv2(x) #convolution layer
x = F.relu(x)
x = self.pool(x) #max pooling
y = x.view(x.shape[0], -1)
y = self.fc1(y) #fully connected layer
y = F.relu(y)
y = self.fc2(y) #output layer
out = F.log_softmax(y, dim=1)
return out
#return 0 # CHANGE CODE HERE
|
GHData/DGenady_gw_torch/resnetNoBN.py: 121-137
def _forward_impl(self, x):
# See note [TorchScript super()]
x = self.conv1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.fc(x)
return x
|
GHData/Ravitha_pyTorch_Examples/Unet.py: 119-132
def forward(self, x):
x = self.layer1(x)
up1 = self.upsample1(x)
x = self.layer2(x)
up2 = self.upsample2(x)
x = self.layer3(x)
up3 = self.upsample3(x)
x = self.layer4(x)
up4 = self.upsample4(x)
merge = torch.cat([up1,up2,up3,up4], dim=1)
merge = self.conv1k(merge)
#out = self.sigmoid(merge)
return merge
|
GHData/XueJiang16_ssl-torch/net.py: 80-106
def forward(self, x):
residual = x
out = self.bn0(x)
out = self.relu(out)
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.dropout(out)
out = self.conv3(out)
# out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
# residual = torch.cat((residual, residual), 1)
out += residual
out = self.relu(out)
return out
|
GHData/Ravitha_pyTorch_Examples/Ex1_Seg_FCN.py: 119-132
def forward(self, x):
x = self.layer1(x)
up1 = self.upsample1(x)
x = self.layer2(x)
up2 = self.upsample2(x)
x = self.layer3(x)
up3 = self.upsample3(x)
x = self.layer4(x)
up4 = self.upsample4(x)
merge = torch.cat([up1,up2,up3,up4], dim=1)
merge = self.conv1k(merge)
#out = self.sigmoid(merge)
return merge
|
GHData/JimpeiYamamoto_myTorch/CAE.py: 63-79
def forward(self, encode):
x = self.dense1(encode)
x = F.dropout(x, training=self.training)
x = F.elu(x)
x = x.view(x.size(0), 8, 50, 50)
x = self.deconv3(x)
x = self.bn3(x)
x = F.elu(x)
x = self.upsample2(x)
x = self.deconv2(x)
x = self.bn2(x)
x = F.elu(x)
x = self.upsample1(x)
x = self.deconv1(x)
x = torch.sigmoid(x)
return x
|
GHData/lhsheild_torchentrance/basic_classification.py: 29-47
def forward(self, x): # 定义数据流向
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.relu(x)
x = x.view(-1, 16 * 28 * 28)
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
x = F.relu(x)
x = self.fc3(x)
return x
|
GHData/ZhichaoOuyang_PyTorch_DDP_Demo/runMNIST.py: 25-40
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.relu(x)
x = F.max_pool2d(x, 2)
x = self.dropout1(x)
x = torch.flatten(x, 1)
x = self.fc1(x)
x = F.relu(x)
x = self.dropout2(x)
x = self.fc2(x)
output = F.log_softmax(x, dim=1)
return output
|
GHData/chaozhong2010_SENet-PyTorch/se_resnet.py: 170-187
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
|
GHData/lhsheild_torchentrance/basic.py: 22-40
def forward(self, x): # 定义数据流向
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.relu(x)
x = x.view(-1, 16 * 28 * 28)
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
x = F.relu(x)
x = self.fc3(x)
return x
|
GHData/shubhamjn1_Classification-pipeline-torch/model.py: 29-47
def forward(self, x):
x = self.layer1(x)
x = self.batchnorm1(x)
x = self.relu(x)
x = self.layer2(x)
x = self.batchnorm2(x)
x = self.relu(x)
x = self.dropout(x)
x = self.layer3(x)
x = self.batchnorm3(x)
x = self.relu(x)
x = self.dropout(x)
x = self.layer_out(x)
return x
|
GHData/tristandb_EfficientDet-PyTorch/retinanet.py: 87-107
def forward(self, x):
out = self.conv1(x)
out = self.act1(out)
out = self.conv2(out)
out = self.act2(out)
out = self.conv3(out)
out = self.act3(out)
out = self.conv4(out)
out = self.act4(out)
out = self.output(out)
# out is B x C x W x H, with C = 4*num_anchors
out = out.permute(0, 2, 3, 1)
return out.contiguous().view(out.shape[0], -1, 4)
|
GHData/ronithbinny_PyTorch/PyTorch-CNN-Faces-Load_model.py: 49-65
def forward(self, x):
out = self.conv_1(x)
out = F.relu(out)
out = self.pool_1(out)
out = self.conv_2(out)
out = F.relu(out)
out = self.pool_2(out)
out = self.linear_1(out.view(-1, 7*7*16))
out = self.linear_2(out)
out = self.linear_3(out)
out = self.linear_4(out)
return F.softmax(out, dim=1)
|
GHData/ronithbinny_PyTorch/PyTorch-CNN-Faces-Final.py: 114-130
def forward(self, x):
out = self.conv_1(x)
out = F.relu(out)
out = self.pool_1(out)
out = self.conv_2(out)
out = F.relu(out)
out = self.pool_2(out)
out = self.linear_1(out.view(-1, 7*7*16))
out = self.linear_2(out)
out = self.linear_3(out)
out = self.linear_4(out)
return F.softmax(out, dim=1)
|
GHData/rllelmz_PyTorch_Project/CNN_Pytorch_Ge%CC%81ne%CC%81rique.py: 114-130
def forward(self, x):
out = self.conv_layer1(x)
out = self.conv_layer2(out)
out = self.max_pool1(out)
out = self.conv_layer3(out)
out = self.conv_layer4(out)
out = self.max_pool2(out)
out = torch.flatten(out, 1)
out = self.fc1(out)
out = self.relu1(out)
out = self.fc2(out)
return out
# Création du modèle
|
GHData/ISwordLion_ResNets_PyTorch_IR_Pedestrian_Images/drn_anlatim.py: 170-184
def forward(self,x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.avgpool(x)
x = x.view(x.size(0),-1)
x = self.fc(x)
return x
|
GHData/XueJiang16_ssl-torch/net.py: 40-61
def forward(self, x):
residual = x
out = self.bn0(x)
out = self.relu(out)
# out = self.dropout(out)
out = self.conv1(out)
out = self.bn1(out)
out = self.relu(out)
out = self.dropout(out)
out = self.conv2(out)
if self.downsample is not None:
residual = self.downsample(x)
# residual = torch.cat((residual,residual),1)
out += residual
out = self.relu(out)
return out
|
GHData/dollarkillerx_PyTorchStudy/three.py: 19-36
def forward(self,x): # 定义数据流向
x = self.conv1(x)
x = F.relu(x) # 隐藏层 激活函数 激活
x = self.conv2(x)
x = F.relu(x)
x = x.view(-1,16*28*28) # 更改形状
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
x = F.relu(x)
x = self.fc3(x)
return x
|
GHData/Grzetan_HandPoseEstimationPyTorch/model.py: 66-81
def forward(self, X):
X = self.conv_in(X)
X = self.bn1(X)
X = self.mish(X)
X = self.pad(X)
X = self.depthwise_conv(X)
X = self.bn1(X)
X = self.mish(X)
X = self.conv_out(X)
X = self.bn2(X)
X = self.mish(X)
return X
|
GHData/rczhen_PyTorch-Deep-Learning-Library/resnet18.py: 117-149
def forward(self, x):
"""
Define network architecture.
Focus high level structure, while leaving the layer details to __init__().
"""
# stem layers
# print("Input --", x.shape)
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
# print("#3 --", x.shape)
# blocks
x = self.layer1(x)
# print("#4 --", x.shape)
x = self.layer2(x)
# print("#5 --", x.shape)
x = self.layer3(x)
# print("#6 --", x.shape)
x = self.layer4(x)
# print("#7 --", x.shape)
# head layers / classifier
x = self.avg_pool(x)
# print("#8 --", x.shape)
x = x.flatten(1)
# print("#9 --", x.shape)
x = self.classifier(x)
# print("Output --", x.shape)
return x
|
GHData/pbehjatii_FENet-PyTorch/ops.py: 140-156
def forward(self, x):
#Low-Frequency Path
path_1 = self.path_1(x)
path_1 = self.relu(path_1)
path_1 = self.k1(path_1)
path_1 = self.relu(path_1)
#High-Frequency Path
path_2 = self.path_2(x)
path_2 = self.relu(path_2)
path_2 = self.HConv(path_2)
path_2 = self.relu(path_2)
output = self.conv(torch.cat([path_1, path_2], dim=1))
output = output + x
return output
|
GHData/developer0hye_PyTorch-Darknet53/model.py: 50-67
def forward(self, x):
out = self.conv1(x)
out = self.conv2(out)
out = self.residual_block1(out)
out = self.conv3(out)
out = self.residual_block2(out)
out = self.conv4(out)
out = self.residual_block3(out)
out = self.conv5(out)
out = self.residual_block4(out)
out = self.conv6(out)
out = self.residual_block5(out)
out = self.global_avg_pool(out)
out = out.view(-1, 1024)
out = self.fc(out)
return out
|
GHData/DeepLatte_DCTTS-torch/networks.py: 355-368
def forward(self, input):
input = input.transpose(1,2) # (B, n_mels, T/r)
ssrnOut = self.Conv1st(input)
ssrnOut = self.Hc1(ssrnOut)
ssrnOut = self.DcHcTwice1(ssrnOut)
ssrnOut = self.DcHcTwice2(ssrnOut)
ssrnOut = self.Conv2nd(ssrnOut)
ssrnOut = self.HcTwice(ssrnOut)
ssrnOut = self.Conv3rd(ssrnOut)
ssrnOut = self.ConvTwice(ssrnOut)
ssrnOut = self.ConvLast(ssrnOut)
return ssrnOut # (B, n_mag, T)
|
GHData/chaozhong2010_SENet-PyTorch/resnext.py: 132-149
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
|
GHData/VecihiAsim_PyTorch-Fully-Convolutional-Image-Classification/FullyConvolutionalResnet18.py: 40-59
def _forward_impl(self, x):
# Standard forward for resnet18
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
# Notice, there is no forward pass
# through the original fully connected layer.
# Instead, we forward pass through the last conv layer
x = self.last_conv(x)
return x
|
GHData/mzhang367_DCDH-PyTorch/model.py: 226-242
def forward(self, x):
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = x.view(x.size(0), -1)
x = self.drop(x)
x = self.fc(x)
x = self.bn(x)
# x = self.drop(x)
x = self.logits(x)
out = self.bn_last(x)
return out
|
GHData/Lornatang_FAN-PyTorch/model.py: 156-174
def forward(self, x: torch.Tensor) -> torch.Tensor:
out = self.conv_block1(x)
out = self.resnet_block1(out)
out = self.conv_block2(out)
out = self.resnet_block2(out)
out = self.conv_block3(out)
out = self.resnet_block3(out)
out = self.conv_block4(out)
out = self.resnet_block4(out)
out = self.conv_block5(out)
out = self.conv_block6(out)
return out
|
GHData/HyperGDX_BVP_torch/ae_model.py: 27-42
def forward(self, x):
y = self.conv1(x)
y = F.relu(y)
y = self.bn1(y)
y = self.conv2(y)
y = F.relu(y)
y = self.bn2(y)
# y = self.pool1(y)
y = self.flat1(y)
y = self.linear1(y)
y = F.relu(y)
# y = F.dropout(y, self.dropout_rate)
y = self.linear2(y)
return y
|
GHData/dbbbbm_f-AnoGAN-PyTorch/wgan64x64.py: 220-237
def forward(self, input):
output = self.ln1(input.contiguous())
output = output.view(-1, 8*self.dim, 4, 4)
output = self.rb1(output)
output = self.rb2(output)
output = self.rb3(output)
output = self.rb4(output)
output = self.bn(output)
output = self.relu(output)
output = self.conv1(output)
output = self.tanh(output)
# output = output.view(-1, OUTPUT_DIM)
return output
|
GHData/harimaruthachalam_PyTorchNNs/convNN.py: 47-74
def forward(self, x):
# Convolution 1
out = self.cnn1(x)
out = self.relu(out)
# Max pool 1
out = self.maxpool(out)
# Convolution 2
out = self.cnn2(out)
out = self.relu(out)
# Max pool 2
out = self.maxpool(out)
# Resize
# Original size: (100, 32, 7, 7)
# out.size(0): 100
# New out size: (100, 32*7*7)
out = out.view(out.size(0), -1)
# Linear function (readout)
out = self.fc1(out)
out = self.relu(out)
out = self.fc2(out)
return out
|
GHData/haotian-liu_torch-localization/Resnet.py: 85-101
def forward(self, x):
x = self.conv1(x)
x = self.relu(x)
x = self.bn1(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
|
GHData/jang1suh_check_GPU_experiments_PyTorch/mnist.py: 28-41
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.relu(x)
x = F.max_pool2d(x, 2)
x = self.dropout1(x)
x = torch.flatten(x, 1)
x = self.fc1(x)
x = F.relu(x)
x = self.dropout2(x)
x = self.fc2(x)
return x
|
GHData/zinsmatt_PyTorch-Examples/resNet.py: 103-116
def forward(self, x):
x = self.conv_init(x)
x = self.bn_init(x)
x = self.pool_init(x)
x = self.backbone(x)
x = self.avg(x)
x = x.flatten(1)
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
x = F.relu(x)
x = self.fc3(x)
return x
|
GHData/bhuminiecki_TorchVoice/model.py: 20-33
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.max_pool1d(x, 2)
x = self.dropout1(x)
x = torch.flatten(x, 1)
x = self.fc1(x)
x = F.relu(x)
x = self.dropout2(x)
x = self.fc2(x)
x = F.softmax(x, dim=0)
return x
|
GHData/mnmjh1215_CSN-PyTorch/csn.py: 97-115
def forward(self, x):
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.max_pool(out)
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = self.avg_pool(out)
out = out.view(out.size(0), -1)
out = self.fc(out)
return out
|
GHData/mnmjh1215_CSN-PyTorch/csn.py: 38-57
def forward(self, x):
shortcut = self.downsample(x)
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
out += shortcut
out = self.relu(out)
return out
|
GHData/mnmjh1215_CSN-PyTorch/resnet3d.py: 67-86
def forward(self, x):
shortcut = self.downsample(x)
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
out += shortcut
out = self.relu(out)
return out
|
GHData/pradeepsk-23_PyTorch/CIFAR10%20-%20ResNet50.py: 82-97
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.reshape(x.shape[0], -1)
x = self.fc(x)
return x
|
GHData/oded282_PyTorch_CNN_Model/ex4.py: 49-63
def forward(self, x):
out = self.layer1(x)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = out.reshape(out.size(0), -1)
out = self.drop_out(out)
out = self.fc1(out)
out = self.fc2(out)
out = self.fc3(out)
out = self.fc4(out)
out = F.log_softmax(out, dim=1)
return out
|
GHData/tscianmarello_CNNPyTorchAndPyTorchLightningNMIST/ConvolutionalNeuralNetworkPyTorch.py: 41-55
def forward(self, x):
result = self.conv1_1(x)
result = self.r1(result)
result = self.mp1(result)
result = self.conv1_2(result)
result = self.r2(result)
result = self.mp2(result)
result = self.conv1_3(result)
result = self.r3(result)
result = self.mp3(result)
result = result.view(result.size(0), -1)
result = self.linear_trans(result)
return result
#Returns the model architecture name
|
GHData/tscianmarello_CNNPyTorchAndPyTorchLightningNMIST/ConvolutionalNeuralNetworkPyTorchLightning.py: 33-50
def forward(self, x):
result = self.conv1_1(x)
result = self.r1(result)
result = self.mp1(result)
result = self.conv1_2(result)
result = self.r2(result)
result = self.mp2(result)
result = self.conv1_3(result)
result = self.r3(result)
result = self.mp3(result)
result = result.view(result.size(0), -1)
result = self.linear_trans(result)
return result
#This is the training_step necessary for PyTorch Lightning. Contains the loss function in loss, the activation
#functions are called in output and makes a log of the information under the Lightning_logs folder of each run.
#Returns the loss.
|
GHData/hiepbkhn_PolyLoss-PyTorch/fashion_mnist.py: 36-51
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.relu(x)
x = F.max_pool2d(x, 2)
x = self.dropout1(x)
x = torch.flatten(x, 1)
x = self.fc1(x)
x = F.relu(x)
x = self.dropout2(x)
x = self.fc2(x)
# output = F.log_softmax(x, dim=1)
# return output
return x
|
GHData/goys94_MalariaNet-for-PyTorch/final.py: 40-57
def forward(self, x):
out = self.layer1(x)
out = self.layer2(out)
out = self.layer3(out)
out = out.view(out.size(0),-1)
out = self.fc1(out)
out = F.relu(out)
out = self.drop(out)
out = self.fc2(out)
out = F.relu(out)
out = self.drop(out)
out = self.fc3(out)
return out
#############################
|
GHData/8yrce_PyTorch-vs-TensorFlow/PyTorch-MNIST.py: 53-80
def forward(self, input_t):
"""
The structure and order of our network layers
:param input_t: the input tensor
:return: output: the output prediction from softmax
"""
# pass input tensor to our input conv layer
input_t = self.conv_1(input_t)
input_t = self.relu_1(input_t)
input_t = self.max_pool_1(input_t)
# pass output of first layer through second conv layer
input_t = self.conv_2(input_t)
input_t = self.relu_2(input_t)
input_t = self.max_pool_2(input_t)
# pass output of second layer through our fully connected layers ( so we can converge on an output )
input_t = flatten(input_t, 1)
input_t = self.fc_1(input_t)
input_t = self.relu_3(input_t)
# pass our fully connected output to a final fc layer and predict with softmax
input_t = self.fc_2(input_t)
output = self.softmax(input_t)
return output
|
GHData/ShaoXiang23_Hourglass_PyTorch/model.py: 116-131
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.r1(x)
x = self.maxpool(x)
x = self.r4(x)
x = self.r5(x)
x = self.hg(x)
x = self.res_list[0](x)
x = self.res_list[1](x)
x = self.cbr(x)
return x
|
GHData/mindingyao_MobileNetV2/ResNet.py: 43-59
def forward(self, x):
residual = x if self.shortcut == None else self.shortcut(x)
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
out = self.relu(out)
out += residual
return out
|
GHData/miraclewkf_SENet-PyTorch/se_resnext.py: 116-132
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
|
GHData/miraclewkf_SENet-PyTorch/se_resnet.py: 170-187
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
|
GHData/LiaoWC_VAE-MNIST-PyTorch/vae.py: 104-121
def forward(self, x):
x = x.view(-1, self.z_dim, 1, 1)
x = self.transpose_conv0(x)
x = self.relu0(x)
x = self.transpose_conv1(x)
x = self.relu1(x)
x = self.transpose_conv2(x)
x = self.relu2(x)
x = self.transpose_conv3(x)
x = self.relu3(x)
x = self.transpose_conv4(x)
x = self.sigmoid(x)
return x
#
# x = x.view(x.shape[0], -1)
# return self.fcs(x).view(x.shape[0], 1, 28, 28)
|
GHData/LiaoWC_VAE-MNIST-PyTorch/show.py: 87-104
def forward(self, x):
x = x.view(-1, self.z_dim, 1, 1)
x = self.transpose_conv0(x)
x = self.relu0(x)
x = self.transpose_conv1(x)
x = self.relu1(x)
x = self.transpose_conv2(x)
x = self.relu2(x)
x = self.transpose_conv3(x)
x = self.relu3(x)
x = self.transpose_conv4(x)
x = self.sigmoid(x)
return x
#
# x = x.view(x.shape[0], -1)
# return self.fcs(x).view(x.shape[0], 1, 28, 28)
|
GHData/chuanli11_WCT-PyTorch/ae.py: 79-92
def forward(self,x):
out = self.conv1(x)
out = self.reflecPad1(out)
out = self.conv2(out)
out = self.relu2(out)
out = self.reflecPad3(out)
out = self.conv3(out)
pool = self.relu3(out)
out,pool_idx = self.maxPool(pool)
out = self.reflecPad4(out)
out = self.conv4(out)
out = self.relu4(out)
return out
|
GHData/diaomin_PyTorch-implementation-of-GhostNet/ghostnet.py: 204-218
def forward(self, x):
x = self.conv_stem(x)
x = self.bn1(x)
x = self.act1(x)
x = self.blocks(x)
x = self.global_pool(x)
x = self.conv_head(x)
x = self.act2(x)
x = x.view(x.size(0), -1)
if self.dropout > 0.:
x = F.dropout(x, p=self.dropout, training=self.training)
x = self.classifier(x)
return x
|
GHData/zhaoyuzhi_PyTorch-MobileNet-v123/network_MobileNetv2.py: 139-154
def forward(self, x):
# feature extraction
x = self.conv1(x) # out: B * 32 * 112 * 112
x = self.conv2(x) # out: B * 16 * 112 * 112
x = self.conv3(x) # out: B * 24 * 56 * 56
x = self.conv4(x) # out: B * 32 * 28 * 28
x = self.conv5(x) # out: B * 64 * 14 * 14
x = self.conv6(x) # out: B * 96 * 14 * 14
x = self.conv7(x) # out: B * 160 * 7 * 7
x = self.conv8(x) # out: B * 320 * 7 * 7
x = self.conv9(x) # out: B * 1280 * 7 * 7
# classifier
x = x.mean(3).mean(2) # out: B * 1280 (global avg pooling)
x = self.classifier(x) # out: B * 1000
return x
|
GHData/Samuel-Bachorik_MNIST_PyTorch_concurrent-loader/mnist_model.py: 22-39
def forward(self, xb):
xb = self.conv1(xb)
xb = self.activation1(xb)
xb = self.conv2(xb)
xb = self.activation2(xb)
xb = self.conv3(xb)
xb = self.activation3(xb)
xb = self.conv4(xb)
xb = self.activation4(xb)
xb = xb.reshape(-1, 128 * 5 * 5)
xb = self.linear1(xb)
xb = self.soft(xb)
return xb
|
GHData/hisham-kottayil_torchserve_demo_v2/mnist.py: 19-31
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.relu(x)
x = F.max_pool2d(x, 2)
x = self.drop(x)
x = torch.flatten(x, 1)
x = self.fc1(x)
x = self.drop2(x)
x = self.fc2(x)
output = F.log_softmax(x, dim=1)
return output
|
GHData/soumickmj_TorchEsegeta/model_copy.py: 144-159
def forward(self, x):
input_shape = x.shape[-2:]
x = self.conv1(x)
x = self.bn1(x)
x = self.relu1(x)
x = self.maxpool1(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.layer5(x)
x = F.interpolate(x, size=input_shape, mode='bilinear', align_corners=False)
#op_max = torch.argmax(x, dim=1, keepdim=True)
#selected_inds = torch.zeros_like(x[0:]).scatter_(1, op_max, 1)
#return (x * selected_inds).sum(dim=(2, 3))
return x
|
GHData/siyaroner_PyTorchTutorial/IntelImageClassification.py: 126-193
def forward(self,input):
output=self.conv1(input)
output=self.bn1(output)
output=self.relu1(output)
output=self.pool(output)
output=self.conv2(output)
output=self.relu2(output)
output=self.conv3(output)
output=self.bn3(output)
output=self.relu3(output)
#Above output will be in matrix form, with shape (256,32,75,75)
output=output.view(-1,32*75*75)
output=self.fc(output)
return output
# class IntelImageClassifcation(nn.Module):
# def __init__(self,num_classes):
# super(IntelImageClassifcation,self).__init__()
# #output size after convolution filter is ((wide-filter+2padding)/s)+1
# #input shape (256,3,150,150) (batch_size, channel,wide,height)
# self.conv1=nn.Conv2d(in_channels=3,out_channels=12,kernel_size=3,stride=1, padding=1)
# #new shape (256,12,150,150)
# self.bn1=nn.BatchNorm2d(num_features=12)
# self.relu1=nn.ReLU()
# self.pool=nn.MaxPool2d(kernel_size=2)
# #Reduce the image size be factor 2
# # shape=(256,12,75,75)
# self.conv2=nn.Conv2d(in_channels=12,out_channels=20,kernel_size=3,stride=1,padding=1)
# #shape=(256,20,75,75)
# self.relu2=nn.ReLU()
# #shape=(256,20,75,75)
# self.conv3=nn.Conv2d(in_channels=20,out_channels=32,kernel_size=3,stride=1,padding=1)
# #new shape (256,32,75,75)
# self.bn2=nn.BatchNorm2d(num_features=32)
# self.relu3=nn.ReLU()
# # shape=(256,32,75,75)
# self.fc=nn.Linear(in_features=75*75*32,out_features=num_classes)
# # feed forward function
# def forward(self,input):
# output=self.conv1(input)
# #output=self.bn1(output)
# output=self.relu1(output)
# output=self.pool(output)
# output=self.conv2(output)
# output=self.relu2(output)
# output=self.conv3(output)
# #output=self.bn2(output)
# output=self.relu3(output)
# # output shape will be (256,32,75,75)
# output=output.view(-1,75*75*32)
# output=self.fc(output)
# return output
|
GHData/vvagias_torch-dist/net.py: 18-35
def forward(self, x):
x = self.conv1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.relu(x)
x = self.max_pool(x)
x = self.dropout1(x)
x = self.flatten(x)
x = self.fc1(x)
x = self.relu(x)
x = self.dropout2(x)
x = self.fc2(x)
return self.log_softmax(x)
|
GHData/ninfueng_bnn-bn-free/test_mnist.py: 88-104
def forward(self, x):
x = x.view(-1, 28 * 28)
x = self.fc1(x)
x = self.bn1(x)
x = self.htanh1(x)
x = self.fc2(x)
x = self.bn2(x)
x = self.htanh2(x)
x = self.fc3(x)
x = self.bn3(x)
x = self.htanh3(x)
x = self.fc4(x)
return x
|
GHData/ninfueng_bnn-bn-free/test_mnist.py: 105-124
def test_forward(self, x):
x = x.view(-1, 28 * 28)
x = self.fc1(x)
x = x + self.bn1.get_int_bias()
x = self.htanh1(x)
x = self.fc2(x)
x = x + self.bn2.get_int_bias()
x = self.htanh2(x)
x = self.fc3(x)
# Must not use YonekawaBatchNorm1d for the last layer is the floating point.
x = self.bn3(x)
x = self.htanh3(x)
# The last layer is the floating-point weights.
x = self.fc4(x)
return x
|
GHData/vujadeyoon_TensorRT-Torch2TRT/resnet.py: 202-219
def _forward_impl(self, x):
# See note [TorchScript super()]
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.fc(x)
return x
|
GHData/zhaoyuzhi_PyTorch-MobileNet-v123-InstanceNorm/network_MobileNetv2.py: 139-154
def forward(self, x):
# feature extraction
x = self.conv1(x) # out: B * 32 * 112 * 112
x = self.conv2(x) # out: B * 16 * 112 * 112
x = self.conv3(x) # out: B * 24 * 56 * 56
x = self.conv4(x) # out: B * 32 * 28 * 28
x = self.conv5(x) # out: B * 64 * 14 * 14
x = self.conv6(x) # out: B * 96 * 14 * 14
x = self.conv7(x) # out: B * 160 * 7 * 7
x = self.conv8(x) # out: B * 320 * 7 * 7
x = self.conv9(x) # out: B * 1280 * 7 * 7
# classifier
x = x.mean(3).mean(2) # out: B * 1280 (global avg pooling)
x = self.classifier(x) # out: B * 1000
return x
|
GHData/589hero_torchserve-tutorial/model.py: 20-33
def forward(self, x):
x = self.conv1(x) # (B, 1, 28, 28) => (B, 32, 26, 26)
x = F.relu(x)
x = self.conv2(x) # (B, 32, 26, 26) => (B, 64, 24, 24)
x = self.max_pooling(x) # (B, 64, 12, 12)
x = self.dropout1(x)
x = torch.flatten(x, 1) # (B, 64*12*12) = (B, 9216)
x = self.fc1(x) # (B, 9216) => (B, 128)
x = F.relu(x)
x = self.dropout2(x)
x = self.fc2(x) # (B, 128) => (B, 10)
output = F.log_softmax(x, dim=-1) # (B, 10) => (B, 10)
return output
|
GHData/zhsaile_PyTorch_Beginner_Tutorial/14_convolutional.py: 52-71
def forward(self, x):
# first conv layer
out = self.conv1(x)
out = self.relu(out)
out = self.maxpool(out)
# second conv layer
out = self.conv2(out)
out = self.relu(out)
out = self.maxpool(out)
# full connected layer
out = out.view(-1, 16*5*5)
out = self.fc1(out)
out = self.relu(out)
out = self.fc2(out)
out = self.relu(out)
out = self.fc3(out)
return out
|
GHData/mingman514_torch_distributed_ml/test6.py: 40-64
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.max_pool2d(x, 2)
x = self.dropout1(x)
x = torch.flatten(x, 1)
# Move tensor to next device if necessary
next_device = next(self.fc1.parameters()).device
x = x.to(next_device)
x = self.fc1(x)
x = F.relu(x)
x = self.dropout2(x)
x = self.fc2(x)
output = F.log_softmax(x, dim=1)
return output
# --------- Helper Methods --------------------
# On the local node, call a method with first arg as the value held by the
# RRef. Other args are passed in as arguments to the function called.
# Useful for calling instance methods.
|
GHData/chaozhong2010_SENet-PyTorch/se_resnext.py: 116-132
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
|
GHData/abhinav-bhardwaj_PyTorch-MNIST-QMNIST-HandWritten-Digit-Recognition-Multi-Layer-Perceptron/digit_classifier_model.py: 28-42
def forward(self, X):
X = self.conv1(X)
X = self.relu1(X)
X = self.maxpool1(X)
X = self.conv2(X)
X = self.relu2(X)
X = self.maxpool2(X)
X = flatten(X,1)
X = self.fc1(X)
X = self.relu3(X)
X = self.fc2(X)
res = self.logSoftmax(X)
return res
|
GHData/acholston_PyTorch_Exercises/Ex11-1b.py: 60-80
def forward(self, x):
x = self.conv1a(x)
x = self.conv1b(x)
x = self.conv1c(x)
x = torch.cat([self.mp1(x), self.conv1d(x)], 1)
x1 = self.branch_3x3_1(x)
x1 = self.branch_3x3_2(x1)
x2 = self.branch_7x7_1(x)
x2 = self.branch_7x7_2(x2)
x2 = self.branch_7x7_3(x2)
x2 = self.branch_7x7_4(x2)
x = torch.cat([x1, x2], 1)
x = torch.cat([self.branch_pool(x), self.mp2(x)], 1)
return x
|
GHData/curaai_pix2pix-torch/network.py: 177-195
def forward(self, src_input, trg_input):
x = torch.cat((src_input, trg_input), 1)
out0 = self.layer0(x)
out1_0 = self.layer1_0(out0)
out1_1 = self.layer1_1(out1_0)
out2_0 = self.layer2_0(out1_1)
out2_1 = self.layer2_1(out2_0)
out3_0 = self.layer3_0(out2_1)
out3_1 = self.layer3_1(out3_0)
out3_2 = self.layer3_2(out3_1)
out4_0 = self.layer4_0(out3_2)
out4_1 = self.layer4_1(out4_0)
out4_2 = self.layer4_2(out4_1)
return out4_2
|
GHData/miraclewkf_MobileNetV2-PyTorch/MobileNetV2.py: 87-107
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.layer5(x)
x = self.layer6(x)
x = self.layer7(x)
x = self.conv8(x)
x = self.avgpool(x)
x = self.conv9(x)
x = x.view(x.size(0),-1)
return x
|
GHData/echurch_dlpix-torch/uresnet.py: 85-108
def forward(self, x):
if self.shortcut is None:
bypass = x
else:
bypass = self.shortcut(x)
residual = self.conv1(x)
residual = self.bn1(residual)
residual = self.relu(residual)
residual = self.conv2(residual)
residual = self.bn2(residual)
residual = self.relu(residual)
residual = self.conv3(residual)
residual = self.bn3(residual)
out = bypass+residual
out = self.relu(out)
return out
|
GHData/mingman514_torch_distributed_ml/test7.py: 40-64
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.max_pool2d(x, 2)
x = self.dropout1(x)
x = torch.flatten(x, 1)
# Move tensor to next device if necessary
next_device = next(self.fc1.parameters()).device
x = x.to(next_device)
x = self.fc1(x)
x = F.relu(x)
x = self.dropout2(x)
x = self.fc2(x)
output = F.log_softmax(x, dim=1)
return output
# --------- Helper Methods --------------------
# On the local node, call a method with first arg as the value held by the
# RRef. Other args are passed in as arguments to the function called.
# Useful for calling instance methods.
|
GHData/echurch_dlpix-torch/uresnet3d.py: 85-108
def forward(self, x):
if self.shortcut is None:
bypass = x
else:
bypass = self.shortcut(x)
residual = self.conv1(x)
residual = self.bn1(residual)
residual = self.relu(residual)
residual = self.conv2(residual)
residual = self.bn2(residual)
residual = self.relu(residual)
residual = self.conv3(residual)
residual = self.bn3(residual)
out = bypass+residual
out = self.relu(out)
return out
|
GHData/JimpeiYamamoto_myTorch/CAE.py: 33-50
def forward(self, img):
x = self.conv1(img)
x = self.bn1(x)
x = F.elu(x)
x = self.pool1(x)
x = self.conv2(x)
x = self.bn2(x)
x = F.elu(x)
x = self.pool2(x)
x = self.conv3(x)
x = self.bn3(x)
x = F.elu(x)
x = x.view(-1, 20000)
x = self.dense1(x)
x = F.dropout(x, training=self.training)
x = F.elu(x)
return x
|
GHData/skrish13_CrossTransformers-PyTorch/resnet.py: 101-123
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
|
GHData/daveboat_torch_gan_example/model.py: 33-53
def forward(self, x):
x = self.fc1(x)
x = self.batchnorm1(x)
x = self.leakyrelu1(x)
# [N, 12544]
x = x.view((-1, 256, 7, 7))
# [N, 256, 7, 7]
x = self.conv2dtranspose2(x)
x = self.batchnorm2(x)
x = self.leakyrelu2(x)
# [N, 128, 7, 7]
x = self.conv2dtranspose3(x)
x = self.batchnorm3(x)
x = self.leakyrelu3(x)
# [N, 64, 14, 14]
x = self.conv2dtranspose4(x)
# [N, 1, 28, 28]
x = self.tanh(x)
return x
|
GHData/bo-10000_ResNet-D_PyTorch/resnetD.py: 95-119
def forward(self, x):
identity = x
# Path A
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
# Path B
if self.downsample is not None:
identity = self.downsample(x)
# Path A + Path B
out += identity
out = self.relu(out)
return out
|
GHData/xddun_test_torch_pipeline/modelx.py: 24-60
def forward(self, x):
x = self.conv1(x)
x = self.max_pool2d1(x)
x = self.relu1(x)
x = self.conv2(x)
x = self.conv2_drop(x)
x = self.max_pool2d2(x)
x = self.relu2(x)
x = x.view(-1, 320)
x = self.fc1(x)
x = self.relu3(x)
x = self.fc1_drop(x)
x = self.fc2(x)
return F.log_softmax(x, dim=1)
# class Net(nn.Module):
# def __init__(self):
# super(Net, self).__init__()
# self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
# self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
# self.conv2_drop = nn.Dropout2d()
# self.fc1 = nn.Linear(320, 50)
# self.fc2 = nn.Linear(50, 10)
#
# def forward(self, x):
# x = F.relu(F.max_pool2d(self.conv1(x), 2))
# x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
# x = x.view(-1, 320)
# x = F.relu(self.fc1(x))
# x = F.dropout(x, training=self.training)
# x = self.fc2(x)
# return F.log_softmax(x)
|
GHData/vujadeyoon_TensorRT-Torch2TRT/resnet.py: 102-124
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
|
GHData/bo-10000_ResNet-D_PyTorch/resnetD_3d.py: 95-119
def forward(self, x):
identity = x
# Path A
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
# Path B
if self.downsample is not None:
identity = self.downsample(x)
# Path A + Path B
out += identity
out = self.relu(out)
return out
|
GHData/muratonuryildirim_PyTorch_Notes/15_ResNet_from_scratch.py: 87-102
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.reshape(x.shape[0], -1)
x = self.fc(x)
return x
|
GHData/Hui-Li_PyTorch_DDP_Demo/runMNIST.py: 25-40
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.relu(x)
x = F.max_pool2d(x, 2)
x = self.dropout1(x)
x = torch.flatten(x, 1)
x = self.fc1(x)
x = F.relu(x)
x = self.dropout2(x)
x = self.fc2(x)
output = F.log_softmax(x, dim=1)
return output
|
GHData/dongwhfdyer_JVTC_ms/resnet.py: 162-226
def construct(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out = out + residual
out = self.relu(out)
return out
##########nhuk####################################
# class tBottleneck(tnn.Module):
# expansion = 4
#
# def __init__(self, inplanes, planes, stride=1, downsample=None):
# super(tBottleneck, self).__init__()
# self.conv1 = tnn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
# self.bn1 = tnn.BatchNorm2d(planes)
# self.conv2 = tnn.Conv2d(planes, planes, kernel_size=3, stride=stride,
# padding=1, bias=False)
# self.bn2 = tnn.BatchNorm2d(planes)
# self.conv3 = tnn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
# self.bn3 = tnn.BatchNorm2d(planes * 4)
# self.relu = tnn.ReLU(inplace=True)
# self.downsample = downsample
# self.stride = stride
#
# ##########nhuk#################################### original one
# def forward(self, x):
# residual = x
#
# out = self.conv1(x)
# out = self.bn1(out)
# out = self.relu(out)
#
# out = self.conv2(out)
# out = self.bn2(out)
# out = self.relu(out)
#
# out = self.conv3(out)
# out = self.bn3(out)
#
# if self.downsample is not None:
# residual = self.downsample(x)
#
# out += residual
# out = self.relu(out)
#
# return out
# ##########nhuk####################################
|
GHData/alekseynp_keras-torchvision/torchvision_resnet.py: 195-215
def build(self, input_shape):
input = Input(shape=input_shape)
x = input
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = Flatten()(x)
x = self.fc(x)
return Model(input, x)
|
GHData/Windxy_Classic_Network_PyTorch/ResNet18.py: 132-149
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = torch.flatten(x,1)
x = self.fc(x)
return x
# 封装,加载预训练参数
|
GHData/alekseynp_keras-torchvision/torchvision_resnet.py: 142-164
def __call__(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu1(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu2(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out = add([out, residual])
out = self.relu3(out)
return out
|
GHData/dongwhfdyer_JVTC_ms/t_resnet.py: 25-47
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
|
GHData/tristandb_EfficientDet-PyTorch/retinanet.py: 130-155
def forward(self, x):
out = self.conv1(x)
out = self.act1(out)
out = self.conv2(out)
out = self.act2(out)
out = self.conv3(out)
out = self.act3(out)
out = self.conv4(out)
out = self.act4(out)
out = self.output(out)
out = self.output_act(out)
# out is B x C x W x H, with C = n_classes + n_anchors
out1 = out.permute(0, 2, 3, 1)
batch_size, width, height, channels = out1.shape
out2 = out1.view(batch_size, width, height, self.num_anchors, self.num_classes)
return out2.contiguous().view(x.shape[0], -1, self.num_classes)
|
GHData/huangtao36_PyTorch-Fully-Convolutional-ResNet-50/network_ResNet.py: 201-214
def forward(self, x):
x = self.begin1(x)
x = self.begin2(x)
x = self.begin3(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.fusion(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
|
GHData/yaoceyi_CenterNet-PyTorch/model.py: 71-93
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
|
GHData/miraclewkf_MobileNetV2-PyTorch/MobileNetV2.py: 21-43
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
|
GHData/tristandb_EfficientDet-PyTorch/utils.py: 58-79
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
|
GHData/huangtao36_PyTorch-Fully-Convolutional-ResNet-50/network_ResNet.py: 163-176
def forward(self, x):
x = self.begin1(x)
x = self.begin2(x)
x = self.begin3(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.fusion(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
|
GHData/SijmNoteboom_PyTorch_tutorial/Network.py: 20-48
def forward(self, t):
# (1) input layer
t = t
# (2) hidden conv layer
t = self.conv1(t)
t = F.relu(t)
t = F.max_pool2d(t, kernel_size=2, stride=2)
# (3) hidden conv layer
t = self.conv2(t)
t = F.relu(t)
t = F.max_pool2d(t, kernel_size=2, stride=2)
# (4) hidden linear layer
t = t.reshape(-1, 12*4*4)
t = self.fc1(t)
t = F.relu(t)
# (5) hidden linear layer
t = self.fc2(t)
t = F.relu(t)
# (6) output layer
t = self.out(t)
# t = F.softmax(t, dim=1)
return t
|
GHData/yuezuegu_torchshape/lenet.py: 42-56
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = self.maxpool1(x)
x = self.conv2(x)
x = F.relu(x)
x = self.maxpool2(x)
x = self.flatten(x)
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
x = F.relu(x)
x = self.fc3(x)
return x
|
GHData/HyperGDX_BVP_torch/ae_model.py: 136-155
def forward(self, x):
y = self.conv1(x)
y = F.relu(y)
y = self.bn1(y)
y = self.conv2(y)
y = F.relu(y)
y = self.bn2(y)
# y = self.pool1(y)
y = self.flat1(y)
y = self.linear1(y)
y = F.relu(y)
# y = F.dropout(y, self.dropout_rate)
y = self.linear2(y)
_, rnn_hidden_states = self.gru(y)
final_hidden_state = rnn_hidden_states[-1, :, :]
y = self.linear3(y)
return y
|
GHData/kose_PyTorch_MNIST-tflite/test_pytorch.py: 22-39
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.relu(x)
x = F.max_pool2d(x, 2)
x = self.dropout1(x)
x = torch.flatten(x, 1)
x = self.fc1(x)
x = F.relu(x)
x = self.dropout2(x)
x = self.fc2(x)
output = F.log_softmax(x, dim=1)
return output
##
## test main function
##
|
GHData/acholston_PyTorch_Exercises/Ex11-3.py: 127-151
def forward(self, x):
#Perform
x = self.conv1(x)
x = F.relu(self.b_norm1(x))
x = self.dense1(x)
x = self.trans1(x)
x = self.dense2(x)
x = self.trans2(x)
x = self.dense3(x)
x = self.trans3(x)
x = self.dense4(x)
x = F.avg_pool2d(x, kernel_size=7, padding=3)
x = x.view(-1, x.size(1))
x = self.fc(x)
return F.log_softmax(x)
#init
|
GHData/phanxuanphucnd_mnist_torchserve/mnist.py: 20-32
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.max_pool2d(x, 2)
x = self.dropout1(x)
x = torch.flatten(x, 1)
x = self.fc1(x)
x = F.relu(x)
x = self.dropout2(x)
x = self.fc2(x)
output = F.log_softmax(x, dim=1)
return output
|
GHData/clovaai_CutMix-PyTorch/resnet.py: 62-82
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
|
GHData/zonghaofan_DRML_torch/network_omg.py: 76-110
def forward(self, x):
"""
:param x: (b, c, h, w)
:return: (b, class_number)
"""
batch_size = x.size(0)
x = self.extractor1(x)
# print(x.size())
x = self.extractor2(x)
# print(x.size())
x = self.extractor3(x)
# print(x.size())
short_cut=x
x=self.bottleneck(x)
x=self.relu(x+short_cut)
# print(x.size())
x = self.conv(x)
x = self.relu(x)
x=self.pool(x)
# print(x.size())
x = self.avgpool(x)
# print(x.size())
x = x.view(batch_size, -1)
output=self.classifier(x)
# print(output.size())
return output
|
GHData/HyperGDX_BVP_torch/ae_model.py: 98-117
def forward(self, x):
y = self.conv1(x)
y = F.relu(y)
y = self.bn1(y)
y = self.conv2(y)
y = F.relu(y)
y = self.bn2(y)
# y = self.pool1(y)
y = self.flat1(y)
y = self.linear1(y)
y = F.relu(y)
# y = F.dropout(y, self.dropout_rate)
y = self.linear2(y)
_, rnn_hidden_states = self.gru(y)
final_hidden_state = rnn_hidden_states[-1, :, :]
y = self.linear3(y)
return y
|
GHData/hisham-kottayil_torchserve_demo_v2_temp/mnist.py: 19-31
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.relu(x)
x = F.max_pool2d(x, 2)
x = self.drop(x)
x = torch.flatten(x, 1)
x = self.fc1(x)
x = self.drop2(x)
x = self.fc2(x)
output = F.log_softmax(x, dim=1)
return output
|
GHData/Chaanks_stklia/models.py: 148-166
def _forward_impl(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = x.transpose(2, 3)
x = x.flatten(1, 2)
x = pooling(x, self.pooling_mode)
x = self.fc(x)
x = self.bn2(x)
return x
|
GHData/rohitgajawada_PyTorchLIP/deeplab_resnet.py: 82-103
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
|
GHData/Eureka-JTX_PyTorch_DDP/model.py: 148-166
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
if self.include_top:
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.fc(x)
return x
|
GHData/eminem171333491_PyTorch-YOLO-V3-overfitting/model.py: 151-167
def forward(self, x):
h_52 = self.trunk_52(x)
h_26 = self.trunk_26(h_52)
h_13 = self.trunk_13(h_26)
convset_out_13 = self.convset_13(h_13)
detetion_out_13 = self.detetion_13(convset_out_13)
up_out_26 = self.up_26(convset_out_13)
route_out_26 = torch.cat((up_out_26, h_26), dim=1)
convset_out_26 = self.convset_26(route_out_26)
detetion_out_26 = self.detetion_26(convset_out_26)
up_out_52 = self.up_52(convset_out_26)
route_out_52 = torch.cat((up_out_52, h_52), dim=1)
convset_out_52 = self.convset_52(route_out_52)
detetion_out_52 = self.detetion_52(convset_out_52)
return detetion_out_13, detetion_out_26, detetion_out_52
#测试网络
|
GHData/NehaJSarnaik_Deep-Voice-Conversion-using-DNN/model.py: 133-149
def forward(self, input):
conv1 = self.conv1(input)
downsample1 = self.downSample1(conv1)
downsample2 = self.downSample2(downsample1)
residual_layer_1 = self.residualLayer(downsample2)
residual_layer_2 = self.residualLayer(residual_layer_1)
residual_layer_3 = self.residualLayer(residual_layer_2)
residual_layer_4 = self.residualLayer(residual_layer_3)
residual_layer_5 = self.residualLayer(residual_layer_4)
residual_layer_6 = self.residualLayer(residual_layer_5)
residual_layer_7 = self.residualLayer(residual_layer_6)
upSample_layer_1 = self.upSample1(residual_layer_7)
upSample_layer_2 = self.upSample2(upSample_layer_1)
output = self.lastConvLayer(upSample_layer_2)
return output
|
GHData/UncleThree0402_PyTorch_FFN_MNIST/train.py: 74-92
def forward(self, x):
x = self.input(x)
x = nn.LeakyReLU()(x)
x = self.bn1(x)
x = self.do1(x)
x = self.ll1(x)
x = nn.LeakyReLU()(x)
x = self.bn2(x)
x = self.do2(x)
x = self.ll2(x)
x = nn.LeakyReLU()(x)
x = self.bn3(x)
x = self.do3(x)
x = self.output(x)
return x
# Create Tool
|
GHData/kose_PyTorch_MNIST-tflite/train.py: 21-36
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.relu(x)
x = F.max_pool2d(x, 2)
x = self.dropout1(x)
x = torch.flatten(x, 1)
x = self.fc1(x)
x = F.relu(x)
x = self.dropout2(x)
x = self.fc2(x)
output = F.log_softmax(x, dim=1)
return output
|
GHData/trisha025_Deep-Learning-with-PyTorch/Fashion_MNIST_Project.py: 75-105
def forward(self, t):
# (1) input layer
t=t
#(2) Conv layer
t= self.conv1(t)
t = F.relu(t)
t = F.max_pool2d(t, kernel_size=2, stride=2)
# (3) Conv Layer
t = self.conv2(t)
t = F.relu(t)
t = F.max_pool2d(t, kernel_size= 2, stride=2)
# (4) Linear layer
t = t.reshape(-1, 12*4*4)
t = self.fc1(t)
t = F.relu(t)
# (5) Linear Layer
t = self.fc2(t)
t = F.relu(t)
# (6) output layer
t = self.out(t)
#t = F.softmax(t, dim=1)
return t
#torch.set_grad_enabled(False)
|
GHData/trisha025_Deep-Learning-with-PyTorch/training_CNN.py: 25-54
def forward(self, t):
# (1) Input layer
t = t
# (2) Conv layer
t = self.conv1(t)
t = F.relu(t)
t = F.max_pool2d(t, kernel_size=2, stride=2)
# (3) Conv Layer
t = self.conv2(t)
t = F.relu(t)
t = F.max_pool2d(t, kernel_size=2, stride=2)
# (4) Linear Layer
t = t.reshape(-1, 12*4*4)
t = self.fc1(t)
t = F.relu(t)
# (5) Linear Layer
t = self.fc2(t)
t = F.relu(t)
# (6) out layer
t = self.out(t)
#t = F.softmax(t, dim=1)
return t
#training set
|
GHData/miraclewkf_ResNeXt-PyTorch/resnext.py: 67-89
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
|
GHData/snowflakewang_Neural-Collaborative-Filtering-with-PyTorch/NeuMF.py: 73-90
def forward(self,user_x,item_x):
GMF_user_embedding = self.GMF_user_embedding(user_x)
GMF_item_embedding = self.GMF_item_embedding(item_x)
GMF_user_latent = self.GMF_user_latent(GMF_user_embedding)
GMF_item_latent = self.GMF_item_latent(GMF_item_embedding)
GMF_vector=torch.mul(GMF_user_latent,GMF_item_latent)
MLP_user_embedding = self.MLP_user_embedding(user_x)
MLP_item_embedding = self.MLP_item_embedding(item_x)
MLP_user_latent = self.MLP_user_latent(MLP_user_embedding)
MLP_item_latent = self.MLP_item_latent(MLP_item_embedding)
MLP_user_item_concat = torch.cat((MLP_user_latent, MLP_item_latent), dim=1) # 是按行拼接一上一下?
MLP_vector=self.mlp(MLP_user_item_concat)
NeuMF_vector=torch.cat((GMF_vector,MLP_vector),dim=1)
prediction=self.neumf(NeuMF_vector)
return prediction
|
GHData/yaoyi30_ResNet_Image_Classification_PyTorch/model.py: 61-82
def forward(self, x):
identity = x
if self.downsample is not None:
identity = self.downsample(x)
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
out += identity
out = self.relu(out)
return out
|
GHData/pranjal2410_ConvNets-Keras-and-PyTorch/CNN_extend_model.py: 86-100
def call(self, inputs, training=False, mask=None):
x = self.conv1(inputs)
x = self.bN(x)
x = self.pool(x)
x = self.conv2(x)
x = self.bN(x)
x = self.pool(x)
x = self.conv3(x)
x = self.bN2(x)
x = self.pool(x)
x = self.flat(x)
x = self.dense1(x)
return self.dense2(x)
|
GHData/SMKamrulHasan_DenseNet-using-PyTorch-CIFAR10/DenseNet_mainBlock.py: 41-58
def forward(self, x):
out= self.relu(self.in_conv(x))
out= self.denseblock1(out)
out=self.transitionLayer1(out)
out= self.denseblock2(out)
out=self.transitionLayer2(out)
out= self.denseblock3(out)
out=self.transitionLayer3(out)
out=self.bn(out)
out=out.view(-1, 64*4*4)
out=self.lastlayer(out)
out=self.final(out)
return out
|
GHData/rowantseng_FUnIE-GAN-PyTorch/models.py: 127-146
def forward(self, x):
# Downsample
d1 = self.conv1(x)
d1a = self.pool(d1) # (B, 32, 128, 128)
d2 = self.conv2(d1a)
d3 = self.conv3(d2)
d3a = self.pool(d3) # (B, 64, 64, 64)
d4 = self.conv4(d3a)
d5 = self.conv5(d4)
d5a = self.pool(d5) # (B, 128, 32, 32)
d6 = self.conv6(d5a) # (B, 256, 32, 32)
# Upsample
u1 = torch.cat([self.deconv1(d6), d5], dim=1) # (B, 384, 64, 64)
u2 = torch.cat([self.deconv2(u1), d3], dim=1) # (B, 320, 128, 128)
u3 = torch.cat([self.deconv3(u2), d1], dim=1) # (B, 160, 256, 256)
return self.act(self.final(self.out2(self.out1(u3))))
|
GHData/jsesr_CSE-GResNet-PyTorch/GResNet.py: 84-106
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
|
GHData/mpalaourg_FashionMNIST-PyTorch/Network.py: 39-65
def forward(self, t):
""" Function to implement the forward pass of the tensors for our (custom) Network """
# (1) input layer : f(x) = x
t = t # torch.Size( [1, 1, 28, 28] )
# (2) hidden conv layer
t = self.conv1(t) # torch.Size( [1, 6, 24, 24] )
t = F.relu(t)
t = F.max_pool2d(t, kernel_size=2, stride=2) # torch.Size( [1, 6, 12, 12] )
# (3) hidden conv layer
t = self.conv2(t) # torch.Size( [1, 12, 8, 8] )
t = F.relu(t)
t = F.max_pool2d(t, kernel_size=2, stride=2) # torch.Size( [1, 12, 4, 4] )
# (4) hidden linear layer
t = t.reshape(-1, 12*4*4) # torch.Size( [1, 192] )
t = self.fc1(t) # torch.Size( [1, 120] )
t = F.relu(t)
# (5) hidden linear layer
t = self.fc2(t) # torch.Size( [1, 60] )
t = F.relu(t)
# (6) output linear layer
t = self.out(t) # torch.Size( [1, 10 ] )
# t = F.softmax(t, dim=1) , cause of the cross entropy loss function being used (already compute softmax)
return t
|
GHData/pradeepsk-23_PyTorch/CIFAR10%20-%20ResNet50.py: 44-63
def forward(self, x):
identity = x.clone()
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu(x)
x = self.conv3(x)
x = self.bn3(x)
if self.identity_downsample is not None:
identity = self.identity_downsample(identity)
x += identity
x = self.relu(x)
return x
|
GHData/SethurajS_CNN_Architectures_in_PyTorch/Resnet.py: 74-90
def forward(self, x):
x = self.conv1(x)
x = self.batchnorm(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer_1(x)
x = self.layer_2(x)
x = self.layer_3(x)
x = self.layer_4(x)
x = self.avgpool(x)
x = x.reshape(x.shape[0], -1)
x = self.fc(x)
return x
|
GHData/Michael5467_PyTorch/mnist_pytorch_examples.py: 21-35
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.relu(x)
x = F.max_pool2d(x, 2)
x = self.dropout1(x)
x = torch.flatten(x, 1)
x = self.fc1(x)
x = F.relu(x)
x = self.dropout2(x)
x = self.fc2(x)
output = F.log_softmax(x, dim=1)
return output
|
GHData/pratyush-1_Resnet-pyTorch/Resnet.py: 95-110
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.fc(x)
return x
|
GHData/xiaywang_q-eegnet_torch/eegnet_quant.py: 81-108
def forward(self, x):
x = x.reshape(x.shape[0], 1, x.shape[1], x.shape[2])
x = self.quant(x)
# Block 1
# x = self.conv1_pad(x)
x = self.conv1_bn(x)
x = self.conv2_bn_relu(x)
x = self.pool1(x)
x = self.dropout1(x)
# Block2
# x = self.sep_conv_pad(x)
x = self.sep_conv1(x)
x = self.sep_conv2_bn_relu(x)
x = self.pool2(x)
x = self.dropout2(x)
# Classification
x = self.flatten(x)
x = self.fc(x)
x = self.dequant(x)
return x
|
GHData/jingxu10_PyTorch_ITT_Examples/cifar_test.py: 67-83
def forward(self, x):
x = self.conv0(x)
x = self.conv1(x)
x = self.conv2(x)
x = F.max_pool2d(x, 2, 2)
x = self.conv3(x)
x = self.conv4(x)
x = F.max_pool2d(x, 2, 2)
x = self.dropout(x)
x = x.view(x.size(0), -1)
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
x = F.log_softmax(x, dim=1)
return x
|
GHData/jxm6165_pod-torch-test/mnist_classify.py: 24-39
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.relu(x)
x = F.max_pool2d(x, 2)
x = self.dropout1(x)
x = torch.flatten(x, 1)
x = self.fc1(x)
x = F.relu(x)
x = self.dropout2(x)
x = self.fc2(x)
output = F.log_softmax(x, dim=1)
return output
|
GHData/skrish13_CrossTransformers-PyTorch/resnet.py: 56-74
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
|
GHData/Nebula4869_PyTorch-gender-age-estimation/model.py: 22-40
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.down_sample is not None:
residual = self.down_sample(x)
out += residual
out = self.relu(out)
return out
|
GHData/JimpeiYamamoto_myTorch/MCDropout.py: 100-124
def forward(self, x):
identity = x
out = self.conv1(x)
#mc
out = self.mcdrop1(out)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
#mc
out = self.mcdrop3(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
#mc
out = self.mcdrop3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
|
GHData/bo-10000_ResNet-D_PyTorch/resnetD.py: 57-75
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
|
GHData/bo-10000_ResNet-D_PyTorch/resnetD_3d.py: 57-75
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
|
GHData/vujadeyoon_TensorRT-Torch2TRT/resnet.py: 57-75
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
|
GHData/pratyush-1_Resnet-pyTorch/Resnet.py: 22-39
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
|
GHData/echurch_dlpix-torch/uresnet3d.py: 42-60
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
|
GHData/echurch_dlpix-torch/uresnet.py: 42-60
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
|
GHData/yaoceyi_CenterNet-PyTorch/model.py: 35-53
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
|
GHData/tristandb_EfficientDet-PyTorch/utils.py: 23-41
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
|
GHData/leichenNUSJ_AAMandDCM/networks_adaCBMA_deform.py: 70-91
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.ca(out) * out
out = self.sa(out) * out
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
|
GHData/ZengyuanYu_PyTorch_60minutes/ResNet.py: 56-70
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
# ResNet
|
GHData/langmanbusi_KinD_PyTorch/model_KinD_color.py: 306-323
def forward(self, x):
residual = x
out = self.conv1(x)
if self.norm in ['BN', 'IN']:
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
if self.norm in ['BN', 'IN']:
out = self.bn2(out)
if self.downsample:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
|
GHData/sunlanchang_YOLOv1-PyTorch/resnet_yolo.py: 40-58
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
|
GHData/rohitgajawada_PyTorchLIP/deeplab_resnet.py: 34-52
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
|
GHData/clovaai_CutMix-PyTorch/resnet.py: 26-44
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
|
GHData/Windxy_Classic_Network_PyTorch/ResNet18.py: 48-67
def forward(self, x):
identity = x # 把输入x保存到identity
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.bn2(x)
# 在网络的有的地方的尺寸已经发生了变化,故需要下采样,保证同步
if self.downsample is not None:
identity = self.downsample(identity)
out = x + identity # 将残差和原始的输入相加
out = self.relu(out) # 在融合之后才调用激活函数
return out
# 定义ResNet网络的结构
|
GHData/Windxy_Classic_Network_PyTorch/ResNet34.py: 40-58
def forward(self, x):
identity = x # 把输入x保存到identity
if self.downsample is not None: # 如果旁路是虚线结构,则需要进行下采样操作,保证尺寸统一,此时downsample不为None
identity = self.downsample(x)
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out += identity # 将残差和原始的输入相加
out = self.relu(out) # 在融合之后才调用激活函数
return out
# 定义ResNet网络的结构
|
GHData/miraclewkf_ResNeXt-PyTorch/resnext.py: 32-50
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
|
GHData/rasbt_ord-torchhub/hubconf.py: 85-101
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
logits = self.output_layer(x) + self.output_biases
return logits
|
GHData/yaoyi30_ResNet_Image_Classification_PyTorch/model.py: 19-36
def forward(self, x):
identity = x
if self.downsample is not None:
identity = self.downsample(x)
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out += identity
out = self.relu(out)
return out
|
GHData/rasbt_ord-torchhub/hubconf.py: 135-151
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
logits = self.output_layer(x)
return logits
|
GHData/alekseynp_keras-torchvision/torchvision_resnet.py: 107-125
def __call__(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu1(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out = add([out, residual])
out = self.relu2(out)
return out
|
GHData/soumickmj_TorchEsegeta/model_copy.py: 52-71
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = nn.ReLU()(out)
out = self.conv2(out)
out = self.bn2(out)
out = nn.ReLU()(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = nn.ReLU()(out)
return out
|
GHData/Chaiyanchong_CutMix-PyTorch-master/resnet.py: 26-44
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
|
GHData/chaozhong2010_SENet-PyTorch/resnext.py: 32-50
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
|
GHData/dyhan0920_PyramidNet-PyTorch/preresnet.py: 70-95
def forward(self, x):
residual = x
out = self.bn1(x)
out = self.relu(out)
if self.downsample is not None:
if self.preact == 'preact':
residual = self.downsample(out)
else:
residual = self.downsample(x)
out = self.conv1(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn3(out)
out = self.relu(out)
out = self.conv3(out)
out += residual
return out
|
GHData/ilex-paraguariensis_torch_modules/resnet3d.py: 40-60
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
# out = self.dr1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
# out = self.dr2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
|
GHData/haotian-liu_torch-localization/Resnet.py: 26-43
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.relu(out)
out = self.bn1(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
|
GHData/zwx8981_DBCNN-PyTorch/SCNN3.py: 94-113
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
|
GHData/YimingZzz_PyTorch_PG2/resnet.py: 61-75
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
# ResNet Module
|
GHData/TalentBoy2333_RetinaNet-PyTorch-Tutorial/resnet.py: 44-62
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
|
GHData/Chaanks_stklia/models.py: 53-70
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
|
GHData/Xingxiangrui_various_pyTorch_network_structure/group_clsgat_parallel.py: 164-204
def forward(self, x):
# input x=residual[batch, group_channels=512, W,H]
# residual = torch.Tensor(x.size(0),0,x.size(2),x.size(3))
# residual [batch, groups*group_channels=6144, W,H]
for group_idx in range(self.groups):
if (group_idx==0):
residual=x
else:
residual=torch.cat((residual,x),dim=1)
# residual=torch.cat((x,x,x,x,x,x,x,x,x,x,x,x), dim=1)
# squeeze and to gropus=12 [batch, group_channels//2*groups=512//2*12=3072,W,H]
out = self.conv1(x)
# same as above [batch, group_channels//2*groups=512//2*12=3072,W,H]
out = self.bn1(out)
# same as above [batch, group_channels//2*groups=512//2*12=3072,W,H]
out = self.relu(out)
# same as above [batch, group_channels//2*groups=512//2*12=3072,W,H]
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
# expand [batch , groups_channels*groups=6144, W,H ]
out = self.conv3(out)
out = self.bn3(out)
# out = self.ca(out) * out
# out = self.sa(out) * out
if self.downsample is not None:
residual = self.downsample(x)
# residual [batch, groups*group_channels=6144, W,H]
out += residual
out = self.relu(out)
return out
# fixme torch parallen group linear, from groups fc to classes
|
GHData/pradeepsk-23_PyTorch/CIFAR10%20-%20ResNet.py: 47-60
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
# ResNet
|
GHData/naveen-hyperworks_Convolutional-Neural-Fabrics-PyTorch-Wrapper/neural_fabrics.py: 185-217
def forward(self, x):
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
residual = x
if self.downsample is not None:
residual = self.downsample(residual)
if self.gating == 'total_dropout':
if np.random.randint(self.op_count) > 0: ### probability of remaining active is 1/op_count
self.val = 0
if self.gating is not None:
residual = torch.mul(residual, self.val)
out += residual
out = self.relu(out)
return out
|
GHData/muratonuryildirim_PyTorch_Notes/15_ResNet_from_scratch.py: 17-33
def forward(self, x):
residual = x.clone()
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.bn2(x)
if self.identity_downsample is not None:
residual = self.identity_downsample(residual)
x += residual
x = self.relu(x)
return x
|
GHData/dyhan0920_PyramidNet-PyTorch/preresnet.py: 29-51
def forward(self, x):
residual = x
out = self.bn1(x)
out = self.relu(out)
if self.downsample is not None:
if self.preact == 'preact':
residual = self.downsample(out)
else:
residual = self.downsample(x)
out = self.conv1(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv2(out)
out += residual
return out
|
GHData/dyhan0920_PyramidNet-PyTorch/resnet.py: 27-45
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
|
GHData/foamliu_InsightFace-PyTorch/models.py: 42-60
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
|
GHData/Knight-Antonio_CosFace-PyTorch/net.py: 96-113
def forward(self, x):
residual = x
out = self.bn1(x)
out = self.conv1(out)
out = self.bn2(out)
out = self.prelu1(out)
out = self.conv2(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
return out
|
GHData/tqlong_torchtut/tut2_model.py: 26-39
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
# ResNet
|
GHData/DGenady_gw_torch/resnetNoBN.py: 41-60
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.relu(out)
out = self.conv2(out)
out = self.relu(out)
out = self.conv3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
|
GHData/JimpeiYamamoto_myTorch/MCDropout.py: 56-69
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
|
GHData/JimpeiYamamoto_myTorch/myResNet50.py: 36-49
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
|
GHData/rasbt_ord-torchhub/hubconf.py: 186-202
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
logits = self.output_layer(x)
return logits
|
GHData/pirunita_SiamMask_OPN_PyTorch/resnet.py: 40-58
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
|
GHData/jsesr_CSE-GResNet-PyTorch/GResNet.py: 45-63
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
|
GHData/skmhrk1209_ResNet-PyTorch/model.py: 74-91
def forward(self, inputs):
shortcut = inputs
inputs = self.norm1(inputs)
inputs = self.act1(inputs)
if self.projection:
shortcut = self.projection(inputs)
inputs = self.conv1(inputs)
inputs = self.norm2(inputs)
inputs = self.act2(inputs)
inputs = self.conv2(inputs)
inputs += shortcut
return inputs
|
GHData/Eureka-JTX_PyTorch_DDP/model.py: 21-39
def forward(self, x):
identity = x
if self.downsample is not None:
identity = self.downsample(x)
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out += identity
out = self.relu(out)
return out
# 高层残差结构
|
GHData/zhaoyuzhi_PyTorch-MobileNet-v123-InstanceNorm/network_MobileNetv1.py: 80-100
def forward(self, x):
# feature extraction
x = self.conv1(x) # out: B * 32 * 112 * 112
x = self.conv2(x) # out: B * 64 * 112 * 112
x = self.conv3(x) # out: B * 128 * 56 * 56
x = self.conv4(x) # out: B * 128 * 56 * 56
x = self.conv5(x) # out: B * 256 * 28 * 28
x = self.conv6(x) # out: B * 256 * 28 * 28
x = self.conv7(x) # out: B * 512 * 14 * 14
x = self.conv8(x) # out: B * 512 * 14 * 14
x = self.conv9(x) # out: B * 512 * 14 * 14
x = self.conv10(x) # out: B * 512 * 14 * 14
x = self.conv11(x) # out: B * 512 * 14 * 14
x = self.conv12(x) # out: B * 512 * 14 * 14
x = self.conv13(x) # out: B * 1024 * 7 * 7
x = self.conv14(x) # out: B * 1024 * 7 * 7
# classifier
x = x.mean(3).mean(2) # out: B * 1024 (global avg pooling)
x = self.classifier(x) # out: B * 1000
return x
|
GHData/chuanli11_WCT-PyTorch/ae.py: 243-261
def forward(self,x):
out = self.reflecPad7(x)
out = self.conv7(out)
out = self.relu7(out)
out = self.unpool(out)
out = self.reflecPad8(out)
out = self.conv8(out)
out = self.relu8(out)
out = self.reflecPad9(out)
out = self.conv9(out)
out = self.relu9(out)
out = self.unpool2(out)
out = self.reflecPad10(out)
out = self.conv10(out)
out = self.relu10(out)
out = self.reflecPad11(out)
out = self.conv11(out)
return out
|
GHData/zhaoyuzhi_PyTorch-MobileNet-v123-InstanceNorm/network_MobileNetv3.py: 280-300
def forward(self, x):
# feature extraction
x = self.conv1(x) # out: B * 16 * 112 * 112
x = self.conv2(x) # out: B * 16 * 56 * 56
x = self.conv3(x) # out: B * 24 * 28 * 28
x = self.conv4(x) # out: B * 24 * 28 * 28
x = self.conv5(x) # out: B * 40 * 14 * 14
x = self.conv6(x) # out: B * 40 * 14 * 14
x = self.conv7(x) # out: B * 40 * 14 * 14
x = self.conv8(x) # out: B * 48 * 14 * 14
x = self.conv9(x) # out: B * 48 * 14 * 14
x = self.conv10(x) # out: B * 96 * 7 * 7
x = self.conv11(x) # out: B * 96 * 7 * 7
x = self.conv12(x) # out: B * 96 * 7 * 7
x = self.conv13(x) # out: B * 576 * 7 * 7
x = self.conv14(x) # out: B * 1280 * 1 * 1
# classifier
x = x.mean(3).mean(2) # out: B * 1280 (global avg pooling)
x = self.classifier(x) # out: B * 1000
return x
|
GHData/hieubkvn123_FaceRecognitionPyTorch/triplet_net.py: 38-61
def forward(self, inputs):
output = self.resnet_model(inputs)
output = self.dense1(output)
output = self.dense1_norm(output)
output = self.dense1_relu(output)
output = self.dense2(output)
output = self.dense2_norm(output)
output = self.dense2_relu(output)
output = self.dense3(output)
output = self.dense3_norm(output)
output = self.dense3_relu(output)
output = self.dense4(output)
output = self.dense4_norm(output)
output = self.dense4_relu(output)
output = self.dense5(output)
output = self.dense5_norm(output)
output = self.dense5_relu(output)
return output
|
GHData/Kodamayuto2001_PyTorchSimpleCNN/useModel5.py: 22-42
def forward(self, x):
x = self.conv1(x)
#print(x.size())
x = torch.relu(x)
x = self.bn1(x)
x = self.pool(x)
#print(x.size())
x = self.conv2(x)
#print(x.size())
x = torch.relu(x)
x = self.bn2(x)
x = self.pool(x)
#print(x.size())
x = x.view(-1,16*5*5)
x = self.fc1(x)
x = torch.relu(x)
x = self.fc2(x)
x = torch.relu(x)
x = self.fc3(x)
return F.log_softmax(x, dim=1)
|
GHData/hieubkvn123_FaceRecognitionPyTorch/train.py: 65-89
def forward(self, inputs):
output = self.resnet_model(inputs)
output = self.dense1(output)
output = self.dense1_norm(output)
output = self.dense1_relu(output)
output = self.dense2(output)
output = self.dense2_norm(output)
output = self.dense2_relu(output)
output = self.dense3(output)
output = self.dense3_norm(output)
output = self.dense3_relu(output)
output = self.dense4(output)
output = self.dense4_norm(output)
output = self.dense4_relu(output)
output = self.dense5(output)
output = self.dense5_norm(output)
output = self.dense5_relu(output)
return output
|
GHData/Kodamayuto2001_PyTorchSimpleCNN/cnn.py: 185-204
def forward(self, x):
x = self.conv1(x)
#print(x.size())
x = torch.relu(x)
x = self.bn1(x)
x = self.pool(x)
#print(x.size())
x = self.conv2(x)
#print(x.size())
x = torch.relu(x)
x = self.bn2(x)
x = self.pool(x)
#print(x.size())
x = x.view(-1,16*5*5)
x = self.fc1(x)
x = torch.relu(x)
x = self.fc2(x)
x = torch.relu(x)
x = self.fc3(x)
return F.log_softmax(x, dim=1)
|
GHData/ralcant_reconet_torch_copy/network.py: 94-116
def forward(self, x):
x = self.cir1(x)
x = self.cir2(x)
x = self.cir3(x)
x = self.rir1(x)
x = self.rir2(x)
x = self.rir3(x)
x = self.rir4(x)
x = self.rir5(x)
feat = x
x = self.up1(x)
x = self.devcir1(x)
x = self.up2(x)
x = self.devcir2(x)
x = self.tanh(x)
# x = self.deconv(x)
return feat, x
|
GHData/kawori_Noise2Noise_PyTorch/unet.py: 42-76
def forward(self, x):
# print("input: ", x.shape)
pool1 = self.block1(x)
# print("pool1: ", pool1.shape)
pool2 = self.block2(pool1)
# print("pool2: ", pool2.shape)
pool3 = self.block2(pool2)
# print("pool3: ", pool3.shape)
pool4 = self.block2(pool3)
# print("pool4: ", pool4.shape)
pool5 = self.block2(pool4)
# print("pool5: ", pool5.shape)
upsample5 = self.block3(pool5)
# print("upsample5: ", upsample5.shape)
concat5 = torch.cat((upsample5, pool4), 1)
# print("concat5: ", concat5.shape)
upsample4 = self.block4(concat5)
# print("upsample4: ", upsample4.shape)
concat4 = torch.cat((upsample4, pool3), 1)
# print("concat4: ", concat4.shape)
upsample3 = self.block5(concat4)
# print("upsample3: ", upsample3.shape)
concat3 = torch.cat((upsample3, pool2), 1)
# print("concat3: ", concat3.shape)
upsample2 = self.block5(concat3)
# print("upsample2: ", upsample2.shape)
concat2 = torch.cat((upsample2, pool1), 1)
# print("concat2: ", concat2.shape)
upsample1 = self.block5(concat2)
# print("upsample1: ", upsample1.shape)
concat1 = torch.cat((upsample1, x), 1)
# print("concat1: ", concat1.shape)
output = self.block6(concat1)
# print("output: ", output.shape)
return output
|
GHData/YimingZzz_PyTorch_PG2/discriminator.py: 35-70
def forward(self, condition, real_fake):
x = torch.cat([condition, real_fake], dim = 0)
x = self.conv1(x)
x = self.batchnorm1(x)
#print (x.size())
x = self.conv2(x)
x = self.batchnorm2(x)
#print (x.size())
x = self.conv3(x)
x = self.batchnorm3(x)
#print (x.size())
x = self.conv4(x)
x = self.batchnorm4(x)
#print (x.size())
x = self.conv5(x)
x = self.batchnorm5(x)
#print (x.size())
x = torch.reshape(x, [-1, 8 * 8 * 8 * self.out_channel])
#print (x.size())
x = self.fc(x)
#print (x.size())
out = torch.reshape(x, [-1])
return out
|
GHData/zhaoyuzhi_PyTorch-MobileNet-v123/network_MobileNetv3.py: 280-300
def forward(self, x):
# feature extraction
x = self.conv1(x) # out: B * 16 * 112 * 112
x = self.conv2(x) # out: B * 16 * 56 * 56
x = self.conv3(x) # out: B * 24 * 28 * 28
x = self.conv4(x) # out: B * 24 * 28 * 28
x = self.conv5(x) # out: B * 40 * 14 * 14
x = self.conv6(x) # out: B * 40 * 14 * 14
x = self.conv7(x) # out: B * 40 * 14 * 14
x = self.conv8(x) # out: B * 48 * 14 * 14
x = self.conv9(x) # out: B * 48 * 14 * 14
x = self.conv10(x) # out: B * 96 * 7 * 7
x = self.conv11(x) # out: B * 96 * 7 * 7
x = self.conv12(x) # out: B * 96 * 7 * 7
x = self.conv13(x) # out: B * 576 * 7 * 7
x = self.conv14(x) # out: B * 1280 * 1 * 1
# classifier
x = x.mean(3).mean(2) # out: B * 1280 (global avg pooling)
x = self.classifier(x) # out: B * 1000
return x
|
GHData/TalentBoy2333_RetinaNet-PyTorch-Tutorial/fpn.py: 59-79
def forward(self, C3, C4, C5):
P5 = self.P5_conv1(C5)
Up5 = self.P5_up(P5)
P5 = self.P5_conv2(P5)
P4 = self.P4_conv1(C4)
P4 = P4 + Up5
Up4 = self.P4_up(P4)
P4 = self.P4_conv2(P4)
P3 = self.P3_conv1(C3)
P3 = P3 + Up4
P3 = self.P3_conv2(P3)
P6 = self.P6_conv(C5)
P7 = self.P7_relu(P6)
P7 = self.P7_conv(P7)
return P3, P4, P5, P6, P7
|
GHData/zhaoyuzhi_PyTorch-MobileNet-v123/network_MobileNetv1.py: 80-100
def forward(self, x):
# feature extraction
x = self.conv1(x) # out: B * 32 * 112 * 112
x = self.conv2(x) # out: B * 64 * 112 * 112
x = self.conv3(x) # out: B * 128 * 56 * 56
x = self.conv4(x) # out: B * 128 * 56 * 56
x = self.conv5(x) # out: B * 256 * 28 * 28
x = self.conv6(x) # out: B * 256 * 28 * 28
x = self.conv7(x) # out: B * 512 * 14 * 14
x = self.conv8(x) # out: B * 512 * 14 * 14
x = self.conv9(x) # out: B * 512 * 14 * 14
x = self.conv10(x) # out: B * 512 * 14 * 14
x = self.conv11(x) # out: B * 512 * 14 * 14
x = self.conv12(x) # out: B * 512 * 14 * 14
x = self.conv13(x) # out: B * 1024 * 7 * 7
x = self.conv14(x) # out: B * 1024 * 7 * 7
# classifier
x = x.mean(3).mean(2) # out: B * 1024 (global avg pooling)
x = self.classifier(x) # out: B * 1000
return x
|
GHData/suraj-maniyar_VQA-PyTorch/model.py: 67-95
def forward(self, x1, x2):
image_feature = self.fc_image(x1)
question_feature = self.question_model(x2)
image_feature = torch.tanh(image_feature)
image_feature = self.bn1(image_feature)
question_feature = self.bn2(question_feature)
concat = torch.mul(image_feature, question_feature)
concat = self.dropout(concat)
out = self.fc_combined_1(concat)
out = torch.tanh(out)
out = self.dropout_1(out)
out = self.fc_combined_2(out)
out = torch.tanh(out)
out = self.dropout_2(out)
out = self.fc_combined_final(out)
return out
|
GHData/liulai_reconet-torch/network.py: 94-116
def forward(self, x):
x = self.cir1(x)
x = self.cir2(x)
x = self.cir3(x)
x = self.rir1(x)
x = self.rir2(x)
x = self.rir3(x)
x = self.rir4(x)
x = self.rir5(x)
feat = x
x = self.up1(x)
x = self.devcir1(x)
x = self.up2(x)
x = self.devcir2(x)
x = self.tanh(x)
# x = self.deconv(x)
return feat, x
|
GHData/dongwhfdyer_JVTC_ms/resnet.py: 283-308
def construct(self, x):
# kuhn edited. The data type other than cell or Primitive is not allowed in Cell.construct.
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool2d(x)
x = x.view(x.shape[0], -1)
x = self.feat(x)
fea = self.feat_bn(x)
fea_norm = P.L2Normalize(axis=1, epsilon=1e-12)(fea) # kuhn: important normalize
x = P.ReLU()(fea)
x = self.classifier(x)
return x, fea_norm, fea
|
GHData/tristandb_EfficientDet-PyTorch/retinanet.py: 44-68
def forward(self, inputs):
C3, C4, C5 = inputs
P5_x = self.P5_1(C5)
P5_upsampled_x = self.P5_upsampled(P5_x)
P5_x = self.P5_2(P5_x)
P4_x = self.P4_1(C4)
P4_x = P5_upsampled_x + P4_x
P4_upsampled_x = self.P4_upsampled(P4_x)
P4_x = self.P4_2(P4_x)
P3_x = self.P3_1(C3)
P3_x = P3_x + P4_upsampled_x
P3_x = self.P3_2(P3_x)
P6_x = self.P6(C5)
P7_x = self.P7_1(P6_x)
P7_x = self.P7_2(P7_x)
return [P3_x, P4_x, P5_x, P6_x, P7_x]
|
GHData/maingoc1605_torch/learn_torch.py: 84-102
def forward(self,x):
out = self.layer1(x)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = self.layer5(out)
out = self.layer6(out)
out = self.layer7(out)
out = self.layer8(out)
out = self.layer9(out)
out = self.layer10(out)
out = self.layer11(out)
out = self.layer12(out)
out = self.layer13(out)
out = out.reshape(out.size(0), -1)
out = self.fc(out)
out = self.fc1(out)
out = self.fc2(out)
return out
|
GHData/makifozkanoglu_MultiResUNet-PyTorch/multiresunet.py: 44-60
def forward(self, inp):
if self.use_dropout:
x = self.dropout(inp)
else:
x = inp
shortcut = self.conv2d_bn(x)
conv3x3 = self.conv3x3(x)
conv5x5 = self.conv5x5(conv3x3)
conv7x7 = self.conv7x7(conv5x5)
out = torch.cat([conv3x3, conv5x5, conv7x7], dim=1)
out = self.bn_1(out)
out = torch.add(shortcut, out)
out = self.relu(out)
out = self.bn_2(out)
return out
|
GHData/smj007_RASNet-PyTorch/model.py: 113-137
def forward(self, x):
# Encoder
x = self.maxpool(self.relu(self.bn(self.conv(x))))
enc1 = self.encoder1(x)
enc2 = self.encoder2(enc1)
enc3 = self.encoder3(enc2)
enc4 = self.encoder4(enc3)
# Decoder
dec4 = self.decoder4(enc4)
a3 = self.af3(dec4, enc3)
dec3 = self.decoder3(a3)
a2 = self.af2(dec3, enc2)
dec2 = self.decoder2(a2)
a1 = self.af1(dec2, enc1)
dec1 = self.decoder1(a1)
# Classifier
logits = self.relu_last(self.deconv_last(dec1))
logits = self.relu_last(self.conv_last1(logits))
logits = self.conv_last2(logits)
# Log-Softmax
out = F.log_softmax(logits, dim=1)
return out
|
GHData/DebeshJha_ResUNetplusplus-PyTorch-/resunet%2B%2B.py: 174-191
def forward(self, inputs):
c1 = self.c1(inputs)
c2 = self.c2(c1)
c3 = self.c3(c2)
c4 = self.c4(c3)
b1 = self.b1(c4)
d1 = self.d1(c3, b1)
d2 = self.d2(c2, d1)
d3 = self.d3(c1, d2)
output = self.aspp(d3)
output = self.output(output)
return output
|
GHData/tejash21_Aerial-Image-Segmentation-with-PyTorch/model.py: 105-118
def forward(self, x):
x1 = self.inc(x)
x2 = self.down1(x1)
x3 = self.down2(x2)
x4 = self.down3(x3)
x5 = self.down4(x4)
x = self.up1(x5, x4)
x = self.up2(x, x3)
x = self.up3(x, x2)
x = self.up4(x, x1)
logits = self.outc(x)
return logits
|
GHData/takmin_PyTorch2OpenCV_sample/save_LeNet_ONNX.py: 19-31
def forward(self, x):
x = self.conv1(x)
x = F.max_pool2d(x, 2)
x = self.conv2(x)
x = F.max_pool2d(x, 2)
x = torch.flatten(x, 1)
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
output = F.log_softmax(x, dim=1)
return output
|
GHData/weibifan_myPyTorchEx/ex14_LeNet_MNIST.py: 51-64
def forward(self, x):
in_size = x.size(0) # 在本例中in_size=512,也就是BATCH_SIZE的值。输入的x可以看成是512*1*28*28的张量。
out = self.conv1(x) # batch*1*28*28 -> batch*10*24*24(28x28的图像经过一次核为5x5的卷积,输出变为24x24)
out = F.relu(out) # batch*10*24*24(激活函数ReLU不改变形状))
out = F.max_pool2d(out, 2, 2) # batch*10*24*24 -> batch*10*12*12(2*2的池化层会减半)
out = self.conv2(out) # batch*10*12*12 -> batch*20*10*10(再卷积一次,核的大小是3)
out = F.relu(out) # batch*20*10*10
out = out.view(in_size, -1) # batch*20*10*10 -> batch*2000(out的第二维是-1,说明是自动推算,本例中第二维是20*10*10)
out = self.fc1(out) # batch*2000 -> batch*500
out = F.relu(out) # batch*500
out = self.fc2(out) # batch*500 -> batch*10
out = F.log_softmax(out, dim=1) # 计算log(softmax(x))
return out
|
GHData/takmin_PyTorch2OpenCV_sample/train_LeNet.py: 21-34
def forward(self, x):
x = self.conv1(x)
x = F.max_pool2d(x, 2)
x = self.conv2(x)
x = F.max_pool2d(x, 2)
x = torch.flatten(x, 1)
x = self.fc1(x)
x = F.relu(x)
x = self.dropout(x)
x = self.fc2(x)
output = F.log_softmax(x, dim=1)
return output
|
GHData/KevinChen1994_torchExample/mnist.py: 54-77
def forward(self, x):
in_size = x.size(0) # 第一个维度的值就是batch_size
'''
使用0填充
output_height=[input_height/stride_height]
output_width=[input_width/stride_width]
不使用0填充
output_height=[(input_height-filter_height+1)/stride_height]
output_width=[(input_width-filter_width+1)/stride_width]
'''
out = self.conv1(x) # [batch_size, 1, 28, 28] -> [batch_size, 10, 24, 24] 默认是不使用0填充
out = F.relu(out)
out = F.max_pool2d(out, 2, 2) # [batch_size, 10, 24, 24] -> [batch_size, 10, 12, 12]
out = self.conv2(out) # [batch_size, 10, 12, 12] -> [batch_size, 20, 10, 10]
out = F.relu(out)
out = out.view(in_size, -1) # [batch_size, 20, 10, 10] -> [batch_size, 20*10*10]
out = self.fc1(out) # [batch_size, 200] -> [batch_size, 500]
out = F.relu(out)
out = self.fc2(out) # [batch_size, 500] -> [batch_size, 10]
out = F.log_softmax(out, dim=1) # 计算log(softmax(x))
return out
# model = LogisticsRegression().to(device=DEVICE)
|
GHData/acholston_PyTorch_Exercises/Ex11-1b.py: 96-111
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch3x3 = self.branch3x3_1(x)
branch3x3 = self.branch3x3_2(branch3x3)
branch3x3dbl = self.branch3x3dbl_1(x)
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl)
branch_pool = F.avg_pool2d(x, kernel_size=3, padding=1, stride=1)
branch_pool = self.branch_pool(branch_pool)
outputs = torch.cat([branch1x1, branch3x3, branch3x3dbl, branch_pool], 1)
return outputs
|
GHData/ArgoHA_torch_train_cnn/torch_train_cnn.py: 122-147
def forward(self, x):
x = self.conv1(x)
x = self.pool1(x)
x = self.batch_norm1(x)
x = self.activ1(x)
x = self.conv2(x)
x = self.pool2(x)
x = self.batch_norm2(x)
x = self.activ2(x)
x = self.conv3(x)
x = self.pool3(x)
x = self.batch_norm3(x)
x = self.activ3(x)
x = self.glob_pool(x).reshape(-1, 128)
x = self.fc1(x)
x = self.activ4(x)
x = self.fc2(x)
x = self.sm(x)
return x
|
GHData/as791_ZOO_Attack_PyTorch/setup_cifar10_model.py: 23-42
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.relu(x)
x = F.max_pool2d(x, 2)
x = self.conv3(x)
x = F.relu(x)
x = self.conv4(x)
x = F.relu(x)
x = F.max_pool2d(x, 2)
x = torch.flatten(x,1)
x = self.fc1(x)
x = F.relu(x)
x = self.dropout1(x)
x = self.fc2(x)
x = F.relu(x)
x = self.fc3(x)
return x
|
GHData/as791_ZOO_Attack_PyTorch/setup_mnist_model.py: 22-41
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.relu(x)
x = F.max_pool2d(x, 2)
x = self.conv3(x)
x = F.relu(x)
x = self.conv4(x)
x = F.relu(x)
x = F.max_pool2d(x, 2)
x = torch.flatten(x,1)
x = self.fc1(x)
x = F.relu(x)
x = self.dropout1(x)
x = self.fc2(x)
x = F.relu(x)
x = self.fc3(x)
return x
|
GHData/myay_SPICE-Torch/run_fashion_binarized_fi.py: 89-116
def forward(self, x):
#print(self)
x = self.conv1(x)
x = F.max_pool2d(x, 2)
x = self.bn1(x)
x = self.htanh(x)
# x = self.relu(x)
x = self.qact1(x)
x = self.conv2(x)
x = F.max_pool2d(x, 2)
x = self.bn2(x)
x = self.htanh(x)
# x = self.relu(x)
x = self.qact2(x)
x = torch.flatten(x, 1)
x = self.fc1(x)
x = self.bn3(x)
x = self.htanh(x)
# x = self.relu(x)
x = self.qact3(x)
x = self.fc2(x)
x = self.scale(x)
# output = F.log_softmax(x, dim=1)
return x
|
GHData/Ravitha_pyTorch_Examples/Unet.py: 155-182
def forward(self, x):
conv1 = self.dconv_down1(x)
x = self.maxpool(conv1)
conv2 = self.dconv_down2(x)
x = self.maxpool(conv2)
conv3 = self.dconv_down3(x)
x = self.maxpool(conv3)
x = self.dconv_down4(x)
x = self.upsample(x)
x = torch.cat([x, conv3], dim=1)
x = self.dconv_up3(x)
x = self.upsample(x)
x = torch.cat([x, conv2], dim=1)
x = self.dconv_up2(x)
x = self.upsample(x)
x = torch.cat([x, conv1], dim=1)
x = self.dconv_up1(x)
out = self.conv_last(x)
return out
|
GHData/RayTeen_ACGAN.PyTorch/models.py: 78-102
def forward(self, input):
x = self.conv1(input)
x = self.lrelu(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.lrelu(x)
x = self.conv3(x)
x = self.bn3(x)
x = self.lrelu(x)
x = self.conv4(x)
x = self.bn4(x)
x = self.lrelu(x)
x = self.conv5(x)
x = x.view(-1, self.ndf * 1)
c = self.aux_linear(x)
s = self.gan_linear(x)
s = self.sigmoid(s)
return s.squeeze(1), c.squeeze(1)
|
GHData/chuanli11_WCT-PyTorch/ae.py: 181-201
def forward(self,x):
out = self.conv1(x)
out = self.reflecPad1(out)
out = self.conv2(out)
out = self.relu2(out)
out = self.reflecPad3(out)
out = self.conv3(out)
pool1 = self.relu3(out)
out,pool_idx = self.maxPool(pool1)
out = self.reflecPad4(out)
out = self.conv4(out)
out = self.relu4(out)
out = self.reflecPad5(out)
out = self.conv5(out)
pool2 = self.relu5(out)
out,pool_idx2 = self.maxPool2(pool2)
out = self.reflecPad6(out)
out = self.conv6(out)
out = self.relu6(out)
return out
|
GHData/xiaywang_q-eegnet_torch/eegnet.py: 95-126
def forward(self, x):
# reshape vector from (s, C, T) to (s, 1, C, T)
x = x.reshape(x.shape[0], 1, x.shape[1], x.shape[2])
# input dimensions: (s, 1, C, T)
# Block 1
x = self.conv1_pad(x)
x = self.conv1(x) # output dim: (s, F1, C, T-1)
x = self.batch_norm1(x)
x = self.conv2(x) # output dim: (s, D * F1, 1, T-1)
x = self.batch_norm2(x)
x = self.activation1(x)
x = self.pool1(x) # output dim: (s, D * F1, 1, T // 8)
x = self.dropout1(x)
# Block2
x = self.sep_conv_pad(x)
x = self.sep_conv1(x) # output dim: (s, D * F1, 1, T // 8 - 1)
x = self.sep_conv2(x) # output dim: (s, F2, 1, T // 8 - 1)
x = self.batch_norm3(x)
x = self.activation2(x)
x = self.pool2(x) # output dim: (s, F2, 1, T // 64)
x = self.dropout2(x)
# Classification
x = self.flatten(x) # output dim: (s, F2 * (T // 64))
x = self.fc(x) # output dim: (s, N)
return x
|
GHData/ChenEating716_PointNet_PyTorch/model.py: 27-56
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu(x)
x = self.conv3(x)
x = self.bn3(x)
x = self.relu(x)
# x = x.view(-1, 1024)
x = torch.max(x, 2, keepdim=True)[0].squeeze()
x = self.fc1(x)
x = self.bn4(x)
x = self.relu(x)
#
x = self.fc2(x)
x = self.bn5(x)
x = self.relu(x)
x = self.fc3(x)
x = F.log_softmax(x, dim=1)
return x
|
GHData/machiredd_EM_segmentation_PyTorch/model_resunet.py: 76-104
def forward(self, x):
# Encode
x1 = self.input_layer(x) + self.input_skip(x)
x2 = self.residual_conv_1(x1)
x3 = self.residual_conv_2(x2)
x4 = self.residual_conv_3(x3)
# Bridge
x5 = self.bridge(x4)
# Decode
x6 = self.upsample_1(x5)
x7 = torch.cat([x6, x4], dim=1)
x8 = self.up_residual_conv1(x7)
x9 = self.upsample_2(x8)
x10 = torch.cat([x9, x3], dim=1)
x11 = self.up_residual_conv2(x10)
x12 = self.upsample_3(x11)
x13 = torch.cat([x12, x2], dim=1)
x14 = self.up_residual_conv3(x13)
x15 = self.upsample_4(x14)
x16 = torch.cat([x15, x1], dim=1)
x17 = self.up_residual_conv4(x16)
output = self.output_layer(x17)
return output
|
GHData/Gerryflap_Torch_MNIST_GAN/mnist_wgangp.py: 98-125
def forward(self, inp):
x = self.conv_1(inp)
x = self.activ(x)
x = self.conv_2(x)
if self.use_bn:
x = self.bn_2(x)
x = self.activ(x)
x = self.conv_3(x)
if self.use_bn:
x = self.bn_3(x)
x = self.activ(x)
x = self.conv_4(x)
if self.use_bn:
x = self.bn_4(x)
x = self.activ(x)
# Flatten to vector
x = x.view(-1, self.h_size * 4)
x = self.lin_1(x)
x = self.activ(x)
x = self.lin_2(x)
return x
|
GHData/igeng_Swaggy_pytorch/resnet.py: 163-199
def forward(self, x):
"""
:param x:
:return:
"""
# print(x.shape)
x = x.transpose(1, 3).transpose(2, 3)
# print(x.shape)
x = F.relu(self.conv1(x))
# [b, 64, h, w] => [b, 1024, h, w]
x = self.blk1_1(x)
x = self.blk1_2(x)
x = self.blk1_3(x)
x = self.blk2_1(x)
x = self.blk2_2(x)
x = self.blk2_3(x)
x = self.blk3_1(x)
x = self.blk3_2(x)
x = self.blk3_3(x)
x = self.blk4_1(x)
x = self.blk4_2(x)
x = self.blk4_3(x)
# print('after conv:', x.shape) #[b, 512, 2, 2]
# [b, 512, h, w] => [b, 512, 1, 1]
x = F.adaptive_avg_pool2d(x, [1, 1])
print('after pool:', x.shape)
x = x.view(x.size(0), -1)
x = self.outlayer(x)
return x
|
GHData/SunnerLi_SVS-UNet-PyTorch/model.py: 172-198
def forward(self, mix):
"""
Generate the mask for the given mixture audio spectrogram
Arg: mix (torch.Tensor) - The mixture spectrogram which size is (B, 1, 512, 128)
Ret: The soft mask which size is (B, 1, 512, 128)
"""
conv1_out = self.conv1(mix)
conv2_out = self.conv2(conv1_out)
conv3_out = self.conv3(conv2_out)
conv4_out = self.conv4(conv3_out)
conv5_out = self.conv5(conv4_out)
conv6_out = self.conv6(conv5_out)
deconv1_out = self.deconv1(conv6_out, output_size = conv5_out.size())
deconv1_out = self.deconv1_BAD(deconv1_out)
deconv2_out = self.deconv2(torch.cat([deconv1_out, conv5_out], 1), output_size = conv4_out.size())
deconv2_out = self.deconv2_BAD(deconv2_out)
deconv3_out = self.deconv3(torch.cat([deconv2_out, conv4_out], 1), output_size = conv3_out.size())
deconv3_out = self.deconv3_BAD(deconv3_out)
deconv4_out = self.deconv4(torch.cat([deconv3_out, conv3_out], 1), output_size = conv2_out.size())
deconv4_out = self.deconv4_BAD(deconv4_out)
deconv5_out = self.deconv5(torch.cat([deconv4_out, conv2_out], 1), output_size = conv1_out.size())
deconv5_out = self.deconv5_BAD(deconv5_out)
deconv6_out = self.deconv6(torch.cat([deconv5_out, conv1_out], 1), output_size = mix.size())
out = F.sigmoid(deconv6_out)
return out
|
GHData/Chaiyanchong_CutMix-PyTorch-master/resnet.py: 148-177
def forward(self, x):
if self.dataset == 'cifar10' or self.dataset == 'cifar100':
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
elif self.dataset == 'imagenet':
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
|
GHData/Grzetan_HandPoseEstimationPyTorch/model.py: 119-146
def forward(self, X):
X = self.conv_in(X)
X = self.bn1(X)
X = self.mish(X)
X = self.pad(X)
X = self.depthwise_conv(X)
X = self.bn1(X)
X = self.mish(X)
# Attention Augmentation
a = self.pad(X)
a = self.aug_conv_out(a)
attn_out = self.qkv_conv(X)
attn_out = self.AA(attn_out)
attn_out = self.attention_out(attn_out)
attn_out = torch.cat((a, attn_out), dim=1)
attn_out = self.bn1(attn_out)
attn_out = self.mish(attn_out)
X = X + attn_out # Add results of depthwise convolution and AA block
# Head
X = self.conv_out(X)
X = self.bn2(X)
X = self.mish(X)
return X
|
GHData/meder411_OmniDepth-PyTorch/network.py: 184-220
def forward(self, x):
# Encode down to 4x
x = self.input0(x)
x = self.input1(x)
x = self.encoder0(x)
x = self.encoder1(x)
x = self.encoder2(x)
x = self.encoder3(x)
x = self.decoder0_0(x)
x = self.decoder0_1(x)
x = self.decoder1_0(x)
x = self.decoder1_1(x)
# Predict at 4x downsampled
pred_4x = self.prediction0(x)
# Upsample through convolution to 2x
x = self.decoder2_0(x)
upsampled_pred_4x = F.interpolate(pred_4x.detach(), scale_factor=2)
# Predict at 2x downsampled
x = self.decoder2_1(torch.cat((x, upsampled_pred_4x), 1))
pred_2x = self.prediction1(x)
# Upsample through convolution to 1x
x = self.decoder3_0(x)
upsampled_pred_2x = F.interpolate(pred_2x.detach(), scale_factor=2)
# Predict at 1x
x = self.decoder3_1(torch.cat((x, upsampled_pred_2x), 1))
pred_1x = self.prediction2(x)
return [pred_1x, pred_2x, pred_4x]
# -----------------------------------------------------------------------------
|
GHData/dyhan0920_PyramidNet-PyTorch/resnet.py: 149-178
def forward(self, x):
if self.dataset == 'cifar10' or self.dataset == 'cifar100':
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
elif self.dataset == 'imagenet':
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
|
GHData/YimingZzz_PyTorch_PG2/skeleton_mask.py: 247-275
def forward(self, x):
out1 = self.model0(x)
out1_1 = self.model1_1(out1)
out1_2 = self.model1_2(out1)
out2 = torch.cat([out1_1,out1_2,out1],1)
out2_1 = self.model2_1(out2)
out2_2 = self.model2_2(out2)
out3 = torch.cat([out2_1,out2_2,out1],1)
out3_1 = self.model3_1(out3)
out3_2 = self.model3_2(out3)
out4 = torch.cat([out3_1,out3_2,out1],1)
out4_1 = self.model4_1(out4)
out4_2 = self.model4_2(out4)
out5 = torch.cat([out4_1,out4_2,out1],1)
out5_1 = self.model5_1(out5)
out5_2 = self.model5_2(out5)
out6 = torch.cat([out5_1,out5_2,out1],1)
out6_1 = self.model6_1(out6)
out6_2 = self.model6_2(out6)
return out6_1,out6_2
|
GHData/YimingZzz_PyTorch_PG2/picture_demo.py: 111-139
def forward(self, x):
out1 = self.model0(x)
out1_1 = self.model1_1(out1)
out1_2 = self.model1_2(out1)
out2 = torch.cat([out1_1,out1_2,out1],1)
out2_1 = self.model2_1(out2)
out2_2 = self.model2_2(out2)
out3 = torch.cat([out2_1,out2_2,out1],1)
out3_1 = self.model3_1(out3)
out3_2 = self.model3_2(out3)
out4 = torch.cat([out3_1,out3_2,out1],1)
out4_1 = self.model4_1(out4)
out4_2 = self.model4_2(out4)
out5 = torch.cat([out4_1,out4_2,out1],1)
out5_1 = self.model5_1(out5)
out5_2 = self.model5_2(out5)
out6 = torch.cat([out5_1,out5_2,out1],1)
out6_1 = self.model6_1(out6)
out6_2 = self.model6_2(out6)
return out6_1,out6_2
|
GHData/splionar_image-sim-torch/layers.py: 98-124
def forward(self, x):
net0 = self.actvn(self.convL0(x))
net0 = self.pool(net0)
net0 = self.DoubleConv2dL0(net0)
net1 = self.Down1(net0)
net2 = self.Down2(net1)
net3 = self.Down3(net2)
net = self.Down4(net3)
net = self.Up4(net)
net = torch.cat([net3, net], dim=1)
net = self.DoubleConv2dR3(net)
net = self.Up3(net)
net = torch.cat([net2, net], dim=1)
net = self.DoubleConv2dR2(net)
net = self.Up2(net)
net = torch.cat([net1, net], dim=1)
net = self.DoubleConv2dR1(net)
net = self.Up1(net)
net = torch.cat([net0, net], dim=1)
net = self.DoubleConv2dR0(net)
net = self.convR0(net)
return net
|
GHData/muratonuryildirim_PyTorch_Notes/14_InceptionNet_from_scratch.py: 38-65
def forward(self, x):
x = self.conv1(x)
x = self.maxpool1(x)
x = self.conv2(x)
x = self.maxpool2(x)
x = self.inception3a(x)
x = self.inception3b(x)
x = self.maxpool3(x)
x = self.inception4a(x)
x = self.inception4b(x)
x = self.inception4c(x)
x = self.inception4d(x)
x = self.inception4e(x)
x = self.maxpool4(x)
x = self.inception5a(x)
x = self.inception5b(x)
x = self.avgpool(x)
x = x.reshape(x.shape[0], -1)
x = self.dropout(x)
x = self.fc1(x)
return x
|
GHData/MNaplesDevelopment_PyTorch-ResNet/model.py: 63-90
def forward(self, input):
x = self.conv1(input)
x = self.bn1(x)
x = self.relu(x)
x = self.res1(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu(x)
x = self.res2(x)
x = self.mp2(x)
x = self.res2(x)
x = self.conv3(x)
x = self.bn3(x)
x = self.relu(x)
x = self.res3(x)
x = self.mp2(x)
x = self.res3(x)
x = self.conv4(x)
x = self.bn4(x)
x = self.relu(x)
x = self.res4(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
|
GHData/JimpeiYamamoto_myTorch/MCDropout.py: 212-239
def _forward_impl(self, x):
# See note [TorchScript super()]
x = self.conv1(x)
x = self.bn1(x)
x = self.relu1(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
#x = self.fc1(x)
'''
custom↓
'''
x = self.relu2(x)
x = self.fc1(x)
x = self.relu3(x)
x = self.dropout1(x)
x = self.fc2(x)
x = self.dropout2(x)
x = self.bn2(x)
x = self.fc3(x)
return x
|
GHData/Eedvard_PyTorch-AlexNet/main.py: 36-67
def forward(self, x):
x = self.conv1(x)
x = self.relu(x)
x = self.max_pool(x)
x = self.conv2(x)
x = self.relu(x)
x = self.max_pool(x)
x = self.conv3(x)
x = self.relu(x)
x = self.conv4(x)
x = self.relu(x)
x = self.conv5(x)
x = self.relu(x)
x = self.max_pool(x)
x = x.view(x.size(0), -1)
x = self.dropout(x)
x = self.fc1(x)
x = self.relu(x)
x = self.dropout(x)
x = self.fc2(x)
x = self.relu(x)
x = self.fc3(x)
return x
|
GHData/clovaai_CutMix-PyTorch/resnet.py: 148-177
def forward(self, x):
if self.dataset == 'cifar10' or self.dataset == 'cifar100':
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
elif self.dataset == 'imagenet':
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
|
GHData/zhaoyuzhi_PyTorch-MobileNet-v123/network_MobileNetv3.py: 217-241
def forward(self, x):
# feature extraction
x = self.conv1(x) # out: B * 16 * 112 * 112
x = self.conv2(x) # out: B * 16 * 112 * 112
x = self.conv3(x) # out: B * 24 * 56 * 56
x = self.conv4(x) # out: B * 24 * 56 * 56
x = self.conv5(x) # out: B * 40 * 28 * 28
x = self.conv6(x) # out: B * 40 * 28 * 28
x = self.conv7(x) # out: B * 40 * 28 * 28
x = self.conv8(x) # out: B * 80 * 14 * 14
x = self.conv9(x) # out: B * 80 * 14 * 14
x = self.conv10(x) # out: B * 80 * 14 * 14
x = self.conv11(x) # out: B * 80 * 14 * 14
x = self.conv12(x) # out: B * 112 * 14 * 14
x = self.conv13(x) # out: B * 112 * 14 * 14
x = self.conv14(x) # out: B * 160 * 7 * 7
x = self.conv15(x) # out: B * 160 * 7 * 7
x = self.conv16(x) # out: B * 160 * 7 * 7
x = self.conv17(x) # out: B * 960 * 7 * 7
x = self.conv18(x) # out: B * 1280 * 1 * 1
# classifier
x = x.mean(3).mean(2) # out: B * 1280 (global avg pooling)
x = self.classifier(x) # out: B * 1000
return x
|
GHData/snowflakewang_Xception-on-ImageNet-Dogs-with-PyTorch/Xception.py: 147-185
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu(x)
x = self.block1(x)
x = self.block2(x)
x = self.block3(x)
x = self.block4(x)
'''
x = self.block5(x)
x = self.block6(x)
x = self.block7(x)
x = self.block8(x)
x = self.block9(x)
x = self.block10(x)
x = self.block11(x)
'''
x = self.block12(x)
x = self.conv3(x)
x = self.bn3(x)
x = self.relu(x)
x = self.conv4(x)
x = self.bn4(x)
x = self.relu(x)
x = F.adaptive_avg_pool2d(x, (1, 1))
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
|
GHData/YimingZzz_PyTorch_PG2/image_preprocess2.py: 185-213
def forward(self, x):
out1 = self.model0(x)
out1_1 = self.model1_1(out1)
out1_2 = self.model1_2(out1)
out2 = torch.cat([out1_1,out1_2,out1],1)
out2_1 = self.model2_1(out2)
out2_2 = self.model2_2(out2)
out3 = torch.cat([out2_1,out2_2,out1],1)
out3_1 = self.model3_1(out3)
out3_2 = self.model3_2(out3)
out4 = torch.cat([out3_1,out3_2,out1],1)
out4_1 = self.model4_1(out4)
out4_2 = self.model4_2(out4)
out5 = torch.cat([out4_1,out4_2,out1],1)
out5_1 = self.model5_1(out5)
out5_2 = self.model5_2(out5)
out6 = torch.cat([out5_1,out5_2,out1],1)
out6_1 = self.model6_1(out6)
out6_2 = self.model6_2(out6)
return out6_1,out6_2
|
GHData/Gerryflap_Torch_MNIST_GAN/mnist_gan.py: 98-126
def forward(self, inp):
x = self.conv_1(inp)
x = self.activ(x)
x = self.conv_2(x)
if self.use_bn:
x = self.bn_2(x)
x = self.activ(x)
x = self.conv_3(x)
if self.use_bn:
x = self.bn_3(x)
x = self.activ(x)
x = self.conv_4(x)
if self.use_bn:
x = self.bn_4(x)
x = self.activ(x)
# Flatten to vector
x = x.view(-1, self.h_size * 4)
x = self.lin_1(x)
x = self.activ(x)
x = self.lin_2(x)
x = torch.sigmoid(x)
return x
|
GHData/JimpeiYamamoto_myTorch/myResNet50.py: 173-200
def _forward_impl(self, x):
# See note [TorchScript super()]
x = self.conv1(x)
x = self.bn1(x)
x = self.relu1(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
#x = self.fc1(x)
'''
custom↓
'''
x = self.relu2(x)
x = self.fc1(x)
x = self.relu3(x)
x = self.dropout1(x)
x = self.fc2(x)
x = self.dropout2(x)
x = self.bn2(x)
x = self.fc3(x)
return x
|
GHData/zhaoyuzhi_PyTorch-MobileNet-v123-InstanceNorm/network_MobileNetv3.py: 217-241
def forward(self, x):
# feature extraction
x = self.conv1(x) # out: B * 16 * 112 * 112
x = self.conv2(x) # out: B * 16 * 112 * 112
x = self.conv3(x) # out: B * 24 * 56 * 56
x = self.conv4(x) # out: B * 24 * 56 * 56
x = self.conv5(x) # out: B * 40 * 28 * 28
x = self.conv6(x) # out: B * 40 * 28 * 28
x = self.conv7(x) # out: B * 40 * 28 * 28
x = self.conv8(x) # out: B * 80 * 14 * 14
x = self.conv9(x) # out: B * 80 * 14 * 14
x = self.conv10(x) # out: B * 80 * 14 * 14
x = self.conv11(x) # out: B * 80 * 14 * 14
x = self.conv12(x) # out: B * 112 * 14 * 14
x = self.conv13(x) # out: B * 112 * 14 * 14
x = self.conv14(x) # out: B * 160 * 7 * 7
x = self.conv15(x) # out: B * 160 * 7 * 7
x = self.conv16(x) # out: B * 160 * 7 * 7
x = self.conv17(x) # out: B * 960 * 7 * 7
x = self.conv18(x) # out: B * 1280 * 1 * 1
# classifier
x = x.mean(3).mean(2) # out: B * 1280 (global avg pooling)
x = self.classifier(x) # out: B * 1000
return x
|
GHData/anujdutt9_SqueezeNet-PyTorch/model.py: 138-188
def forward(self, x):
# Input Layer
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.max_pool(x)
# Fire Module 2
x = self.fire2(x)
# Fire Module 3
x = self.fire3(x)
# Fire Module 4
x = self.fire4(x)
# MaxPool
x = self.max_pool(x)
# Fire Module 5
x = self.fire5(x)
# Fire Module 6
x = self.fire6(x)
# Fire Module 7
x = self.fire7(x)
# Fire Module 8
x = self.fire8(x)
# MaxPool
x = self.max_pool(x)
# Fire Module 9
x = self.fire9(x)
x = self.dropout(x)
# Conv10
x = self.conv10(x)
x = self.relu(x)
# Global Avg Pool
x = self.avg_pool(x)
# Softmax Activation
x = self.softmax(x)
return torch.flatten(x, start_dim=1)
|
GHData/YimingZzz_PyTorch_PG2/image_preprocess.py: 185-213
def forward(self, x):
out1 = self.model0(x)
out1_1 = self.model1_1(out1)
out1_2 = self.model1_2(out1)
out2 = torch.cat([out1_1,out1_2,out1],1)
out2_1 = self.model2_1(out2)
out2_2 = self.model2_2(out2)
out3 = torch.cat([out2_1,out2_2,out1],1)
out3_1 = self.model3_1(out3)
out3_2 = self.model3_2(out3)
out4 = torch.cat([out3_1,out3_2,out1],1)
out4_1 = self.model4_1(out4)
out4_2 = self.model4_2(out4)
out5 = torch.cat([out4_1,out4_2,out1],1)
out5_1 = self.model5_1(out5)
out5_2 = self.model5_2(out5)
out6 = torch.cat([out5_1,out5_2,out1],1)
out6_1 = self.model6_1(out6)
out6_2 = self.model6_2(out6)
return out6_1,out6_2
|
GHData/DragonChen-TW_torch_DDP/models.py: 162-175
def forward(self, x):
x = self.pre(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avg_pool(x)
x = torch.flatten(x, 1)
x = self.fc(x)
return x
|
GHData/DragonChen-TW_torch_DDP/models.py: 111-124
def forward(self, x):
x = self.pre(x)
x = self.max_pool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.avg_pool(x)
x = torch.flatten(x, 1)
x = self.fc(x)
return x
|
GHData/wakandan_torch-codes/resnet.py: 90-106
def forward(self, x):
x = self.conv1(x)
x = self.max_pool1(x)
x = self.block1(x)
x = self.block2(x)
x = self.block3(x)
x = self.block4(x)
bs = x.shape[0]
x = x.reshape(bs, -1)
x = self.fc(x)
return x
# image = Image.open('./13213385.jpg')
# resnet = MyResNet((2, 2, 2, 2))
# img = tfms(image).unsqueeze(0)
# result = resnet(img)
# print(result.shape)
|
GHData/zonghaofan_DRML_torch/network_fer2013.py: 94-124
def forward(self, x):
"""
:param x: (b, c, h, w)
:return: (b, class_number)
"""
batch_size = x.size(0)
x = self.extractor1(x)
# print(x.shape)
x = self.extractor2(x)
# print(x.shape)
x = self.extractor3(x)
short_cut = x
x = self.bottleneck(x)
x = self.relu(x + short_cut)
# print(x.shape)
x = self.avgpool(x)
# print(x.shape)
x = x.view(batch_size, -1)
# print(x.shape)
output=self.classifier(x)
return output
|
GHData/acholston_PyTorch_Exercises/Ex11-1a.py: 50-68
def forward(self, x):
#Perform
branch1x1 = self.branch1x1(x)
branch5x5 = self.branch5x5_1(x)
branch5x5 = self.branch5x5_2(branch5x5)
branch3x3dbl = self.branch3x3dbl_1(x)
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl)
branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1)
branch_pool = self.branch_pool(branch_pool)
#Concat branches
outputs = [branch1x1, branch5x5, branch3x3dbl, branch_pool]
return torch.cat(outputs, 1)
#Second Inception Module
|
GHData/cwangjiang_PyTorch_Basics/11_cnnInceptionMNIST.py: 35-50
def forward(self, x):
branch1 = self.branch1_1(x)
branch2_1 = self.branch2_1(x)
branch2_2 = self.branch2_2(branch2_1)
branch3_1 = self.branch3_1(x)
branch3_2 = self.branch3_2(branch3_1)
branch3_3 = self.branch3_3(branch3_2)
branch4_1 = F.avg_pool2d(x, kernel_size = 3, stride = 1, padding = 1)
branch4_2 = self.branch4_1(branch4_1)
outputs = [branch1, branch2_2, branch3_3, branch4_2]
return torch.cat(outputs, 1)
|
GHData/moqi112358_PyTorch-learning/10-Inception_CNN_MNIST.py: 42-54
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch5x5 = self.branch5x5_1(x)
branch5x5 = self.branch5x5_2(branch5x5)
branch3x3dbl = self.branch3x3dbl_1(x)
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl)
branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1)
branch_pool = self.branch_pool(branch_pool)
outputs = [branch1x1, branch5x5, branch3x3dbl, branch_pool]
return torch.cat(outputs, 1)
|
GHData/zhuodannychen_PyTorch-Models/cnn_inception.py: 48-64
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch5x5 = self.branch5x5_1(x)
branch5x5 = self.branch5x5_2(branch5x5)
branch3x3dbl = self.branch3x3dbl_1(x)
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl)
branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1)
branch_pool = self.branch_pool(branch_pool)
outputs = [branch1x1, branch5x5, branch3x3dbl, branch_pool]
return torch.cat(outputs, 1)
|
GHData/ORANKWON_PyTorchZeroToAll/11_1_toy_inception_mnist.py: 49-65
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch5x5 = self.branch5x5_1(x)
branch5x5 = self.branch5x5_2(branch5x5)
branch3x3dbl = self.branch3x3dbl_1(x)
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl)
branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1)
branch_pool = self.branch_pool(branch_pool)
outputs = [branch1x1, branch5x5, branch3x3dbl, branch_pool]
return torch.cat(outputs, 1)
|
GHData/randalong_PyTorchZeroToAll/11_inception_model.py: 16-28
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch5x5 = self.branch5x5_1(x)
branch5x5 = self.branch5x5_2(branch5x5)
branch3x3dbl = self.branch3x3dbl_1(x)
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl)
branch_pool = F.avg_pool2d(x, kernel_size = 3, stride = 1, padding = 1)
branch_pool = self.branch_pool(branch_pool)
outputs = [branch1x1, branch5x5, branch3x3dbl, branch_pool]
return torch.cat(outputs, 1)
|
GHData/sarvesh211999_PyTorch/11_1_toy_inception_mnist.py: 49-65
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch5x5 = self.branch5x5_1(x)
branch5x5 = self.branch5x5_2(branch5x5)
branch3x3dbl = self.branch3x3dbl_1(x)
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl)
branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1)
branch_pool = self.branch_pool(branch_pool)
outputs = [branch1x1, branch5x5, branch3x3dbl, branch_pool]
return torch.cat(outputs, 1)
|
GHData/muhmaz3_pyTorchZeroToALL/11_1_toy_inception_mnist.py: 49-65
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch5x5 = self.branch5x5_1(x)
branch5x5 = self.branch5x5_2(branch5x5)
branch3x3dbl = self.branch3x3dbl_1(x)
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl)
branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1)
branch_pool = self.branch_pool(branch_pool)
outputs = [branch1x1, branch5x5, branch3x3dbl, branch_pool]
return torch.cat(outputs, 1)
|
GHData/hunkim_PyTorchZeroToAll/11_1_toy_inception_mnist.py: 49-65
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch5x5 = self.branch5x5_1(x)
branch5x5 = self.branch5x5_2(branch5x5)
branch3x3dbl = self.branch3x3dbl_1(x)
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl)
branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1)
branch_pool = self.branch_pool(branch_pool)
outputs = [branch1x1, branch5x5, branch3x3dbl, branch_pool]
return torch.cat(outputs, 1)
|
GHData/sharathmaidargi_finetune_torchvision/inception.py: 151-167
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch5x5 = self.branch5x5_1(x)
branch5x5 = self.branch5x5_2(branch5x5)
branch3x3dbl = self.branch3x3dbl_1(x)
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl)
branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1)
branch_pool = self.branch_pool(branch_pool)
outputs = [branch1x1, branch5x5, branch3x3dbl, branch_pool]
return torch.cat(outputs, 1)
|
GHData/vohoaiviet_PyTorchZeroToAll/11_1_toy_inception_mnist.py: 49-65
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch5x5 = self.branch5x5_1(x)
branch5x5 = self.branch5x5_2(branch5x5)
branch3x3dbl = self.branch3x3dbl_1(x)
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl)
branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1)
branch_pool = self.branch_pool(branch_pool)
outputs = [branch1x1, branch5x5, branch3x3dbl, branch_pool]
return torch.cat(outputs, 1)
|
GHData/pratikchhapolika_PyTorch_Beginners/11_1_toy_inception_mnist.py: 49-65
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch5x5 = self.branch5x5_1(x)
branch5x5 = self.branch5x5_2(branch5x5)
branch3x3dbl = self.branch3x3dbl_1(x)
branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl)
branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1)
branch_pool = self.branch_pool(branch_pool)
outputs = [branch1x1, branch5x5, branch3x3dbl, branch_pool]
return torch.cat(outputs, 1)
|
GHData/mnmjh1215_CSN-PyTorch/resnet3d.py: 28-43
def forward(self, x):
shortcut = self.downsample(x)
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out += shortcut
out = self.relu(out)
return out
|
GHData/MouxiaoHuang_myPRNet-PyTorch/ResNet10.py: 20-33
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
#if self.downsample:
# residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
# ResNet10
|
GHData/MNaplesDevelopment_PyTorch-ResNet/model.py: 32-43
def forward(self, input):
residual = input
x = self.conv1(input)
x = self.bn1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.bn2(x)
x += residual
x = self.relu(x)
return x
|
GHData/jcheunglin_Full-Resolution-Residual-Networks-with-PyTorch/FRRNet.py: 54-74
def forward(self, x):
if self.efficent:
resfunc = residual_func(self.relu,self.bn1,self.conv1,self.bn2,self.conv2)
ret = cp.checkpoint(resfunc,x)
return ret
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out += residual
out = self.relu(out)
return out
|
GHData/rczhen_PyTorch-Deep-Learning-Library/resnet18.py: 54-75
def forward(self, x):
h = x
# print("Residual Input --", x.shape)
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
# print("Residual #1 --", x.shape)
x = self.conv2(x)
x = self.bn2(x)
# print("Residual #2 --", x.shape)
identity = self.down_sample(h)
# print("Residual identity --", identity.shape)
x = x + identity
x = self.relu(x)
# print("Residual Output --", x.shape)
return x
|
GHData/tuladhay_ATOC_COMA_PyTorch/algorithm.py: 93-106
def forward(self, inputs):
x = inputs
x = self.linear1(x)
x = self.ln1(x)
x = F.relu(x)
x = self.linear2(x)
x = self.ln2(x)
x = F.relu(x)
mu = F.tanh(self.mu(x))
output = self.softmax(mu)
return output
# This is the softmax probabilities for the actions of the agent
|
GHData/zinsmatt_PyTorch-Examples/resNet.py: 59-73
def forward(self, x):
x_skip = x
x = self.conv1(x)
x = self.bn1(x)
x = F.relu(x)
x = self.conv2(x)
x = self.bn2(x)
x_skip = self.conv_skip(x_skip)
x_skip = self.bn_skip(x_skip)
x = x + x_skip
return x
|
GHData/jameschengpeng_PyTorch-CNN-on-CIFAR10/ConvNetClassifier.py: 54-69
def forward(self, x):
x = F.relu(self.conv1(x)) #32*32*48
x = F.relu(self.conv2(x)) #32*32*96
x = self.pool(x) #16*16*96
x = self.Dropout(x)
x = F.relu(self.conv3(x)) #16*16*192
x = F.relu(self.conv4(x)) #16*16*256
x = self.pool(x) # 8*8*256
x = self.Dropout(x)
x = x.view(-1, 8*8*256) # reshape x
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.Dropout(x)
x = self.fc3(x)
return x
# Define loss function and optimizer. We employ cross-entropy and Adam
|
GHData/SatoKeiju_Triplet-Network-PyTorch/models.py: 19-33
def forward(self, x):
x = F.relu(self.conv1(x))
x = self.maxpool(x)
x = F.relu(self.conv2(x))
x = self.maxpool(x)
x = F.relu(self.conv3(x))
x = self.maxpool(x)
x = self.conv4(x)
x = self.avgpool(x)
# x = self.dropout(x)
# x = x.view(-1, 128*5*5)
# x = F.relu(self.fc(x))
# x = self.classifier(x)
return x
|
GHData/Knight3-code_PyTorch-TVM-PTQ-Test/tvm_discuss_pytorch.py: 46-58
def forward(self, x):
x = self.quant(x)
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = self.pool1(x)
x = x.reshape(-1,9216)
x = self.dr1(x)
x = F.relu(self.fc1(x))
x = self.dr2(x)
x = self.fc2(x)
x = self.dequant(x)
return x
|
GHData/Xiaoqiqiyaya_PyTorch-Networks/Lenet%20with%20pytorch.py: 20-35
def forward(self,x):
out = self.relu(self.conv1(x))
out = self.maxpool(out)
out = self.relu2(self.conv2(out))
out = self.maxpool2(out)
out = out.view(out.size(0),-1)
out = self.fc1(out)
out = self.relu(out)
out = self.fc2(out)
out = self.relu(out)
out = self.fc3(out)
return out
|
GHData/aditya12agd5_pytorch_divcolor/mdn.py: 38-56
def forward(self, feats):
x = F.relu(self.mdn_conv1(feats))
x = self.mdn_bn1(x)
x = F.relu(self.mdn_conv2(x))
x = self.mdn_bn2(x)
x = F.relu(self.mdn_conv3(x))
x = self.mdn_bn3(x)
x = F.relu(self.mdn_conv4(x))
x = self.mdn_bn4(x)
x = F.relu(self.mdn_conv5(x))
x = self.mdn_bn5(x)
x = F.relu(self.mdn_conv6(x))
x = self.mdn_bn6(x)
x = F.relu(self.mdn_conv7(x))
x = self.mdn_bn7(x)
x = x.view(-1, 4*4*64)
x = self.mdn_dropout1(x)
x = self.mdn_fc1(x)
return x
|
GHData/chakam1307_Image-classification-using-PyTorch/new_model.py: 169-188
def forward(self, desc):
x = self.relu(self.conv5_1(desc))
x = self.relu(self.conv5_2(x))
x = self.bn1(x)
x = self.relu(self.conv6_1(x))
x = self.relu(self.conv6_2(x))
x = self.bn2(x)
x = torch.flatten(x,1)
# print("tensor : ", x.shape)
x = self.fc1(x)
x = self.fc2(x)
x = self.fc3(x)
return x
|
GHData/mickymicmouse_v_OCGAN_torch/networks.py: 145-162
def forward(self, input):
output = self.conv1(input)
output = self.leaky_relu(output)
output = self.batch_norm_1(self.conv2(output))
output = self.leaky_relu(output)
output = self.batch_norm_2(self.conv3(output))
output = self.leaky_relu(output)
output = self.conv4(output)
output = torch.sigmoid(output)
output = output.view(output.size(0),-1)
return output
|
GHData/mickymicmouse_v_OCGAN_torch/networks.py: 182-198
def forward(self, input):
output = self.conv1(input)
output = self.leaky_relu(output)
output = self.batch_norm_1(self.conv2(output))
output = self.leaky_relu(output)
output = self.batch_norm_2(self.conv3(output))
output = self.leaky_relu(output)
output = self.conv4(output)
output = torch.sigmoid(output)
output = output.view(output.size(0), -1)
return output
|
GHData/zonghaofan_DRML_torch/network_fer2013_deep_short.py: 111-157
def forward(self, x):
"""
:param x: (b, c, h, w)
:return: (b, class_number)
"""
batch_size = x.size(0)
short_x=x
x1 = self.extractor1(x)
# print(x1.shape)
x2=self.extractor1_shortcut(short_x)
# print(x2.shape)
x=x1+x2
# print(x.shape)
short_x=x
x1 = self.extractor2(x)
# print(x1.shape)
x2 = self.extractor2_shortcut(short_x)
# print(x2.shape)
x = x1 + x2
# print(x.shape)
short_x = x
x1 = self.extractor3(x)
# print(x1.shape)
x2 = self.extractor3_shortcut(short_x)
# print(x2.shape)
x = x1 + x2
# print(x.shape)
# short_cut = x
# x = self.bottleneck(x)
# x = self.relu(x + short_cut)
x=self.conv(x)
# print(x.shape)
x = self.avgpool(x)
x = x.view(batch_size, -1)
output=self.classifier(x)
return output
|
GHData/pb2377_PyTorch-Disentangling-Content-and-Style-Unsupervised/hourglasses.py: 82-96
def forward(self, x):
out1 = x
out1 = self.skip(out1)
out2 = x
out2 = self.mp(out2)
out2 = self.afterpool(out2)
if self.numReductions > 1:
out2 = self.hg(out2)
else:
out2 = self.num1res(out2)
out2 = self.lowres(out2)
out2 = self.up(out2)
return out2 + out1
|
GHData/dbbbbm_f-AnoGAN-PyTorch/wgan64x64.py: 138-154
def forward(self, input):
if self.input_dim == self.output_dim and self.resample == None:
shortcut = input
else:
shortcut = self.conv_shortcut(input)
output = input
output = self.bn1(output)
output = self.relu1(output)
output = self.conv_1(output)
output = self.bn2(output)
output = self.relu2(output)
output = self.conv_2(output)
return shortcut + output
|
GHData/Nial4_PyNET-Bokeh-PyTorch/model_original.py: 121-135
def level_5(self, pool4):
conv_l5_d1 = self.conv_l5_d1(pool4)
conv_l5_d2 = self.conv_l5_d2(conv_l5_d1)
conv_l5_d3 = self.conv_l5_d3(conv_l5_d2)
conv_l5_d4 = self.conv_l5_d4(conv_l5_d3)
conv_t4a = self.conv_t4a(conv_l5_d4)
conv_t4b = self.conv_t4b(conv_l5_d4)
conv_l5_out = self.conv_l5_out(conv_l5_d4)
output_l5 = self.output_l5(conv_l5_out)
return output_l5, conv_t4a, conv_t4b
|
GHData/aiff22_PyNET-PyTorch/model.py: 107-121
def level_5(self, pool4):
conv_l5_d1 = self.conv_l5_d1(pool4)
conv_l5_d2 = self.conv_l5_d2(conv_l5_d1)
conv_l5_d3 = self.conv_l5_d3(conv_l5_d2)
conv_l5_d4 = self.conv_l5_d4(conv_l5_d3)
conv_t4a = self.conv_t4a(conv_l5_d4)
conv_t4b = self.conv_t4b(conv_l5_d4)
conv_l5_out = self.conv_l5_out(conv_l5_d4)
output_l5 = self.output_l5(conv_l5_out)
return output_l5, conv_t4a, conv_t4b
|
GHData/BenjaminAm_PyTorch-Tutorial/untitled0.py: 66-83
def forward(self, x):
x = self.conv1(x)
x = F.max_pool2d(x,2)
x = F.relu(x)
x = self.conv2(x)
x = F.max_pool2d(x,2)
x = F.relu(x)
x = self.conv3(x)
x = F.max_pool2d(x,2)
x = F.relu(x)
x = self.conv4(x)
x = F.max_pool2d(x,2)
x = F.relu(x)
x = x.view(-1,3456)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return F.sigmoid(x)
|
GHData/dbbbbm_f-AnoGAN-PyTorch/wgan64x64.py: 301-313
def forward(self, x):
x = self.dropout(x)
x = self.conv_in(x)
x = self.res1(x)
x = self.res2(x)
x = self.res3(x)
x = self.res4(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return torch.tanh(x)
|
GHData/ashuuuriii_PyTorch-MobileNetV3/model.py: 125-145
def forward(self, x):
residual = x
y = self.pw1(x)
y = self.act(y)
y = self.dw(y)
y = self.act(y)
if self.se:
y = self.se(y)
y = self.pw2(y)
if self.drop_out:
y = self.drop_out(y)
if self.residual_connection:
return y + residual
else:
return y
|
GHData/tfmoraes_deep_lungs_torch/model.py: 71-100
def forward(self, img):
enc1 = self.encoder1(img)
enc2 = self.encoder2(self.pool1(enc1))
enc3 = self.encoder3(self.pool2(enc2))
enc4 = self.encoder4(self.pool3(enc3))
bottleneck = self.bottleneck(self.pool4(enc4))
upconv4 = self.upconv4(bottleneck)
dec4 = torch.cat((upconv4, enc4), dim=1)
dec4 = self.decoder4(dec4)
upconv3 = self.upconv3(dec4)
dec3 = torch.cat((upconv3, enc3), dim=1)
dec3 = self.decoder3(dec3)
upconv2 = self.upconv2(dec3)
dec2 = torch.cat((upconv2, enc2), dim=1)
dec2 = self.decoder2(dec2)
upconv1 = self.upconv1(dec2)
dec1 = torch.cat((upconv1, enc1), dim=1)
dec1 = self.decoder1(dec1)
conv = self.conv(dec1)
sigmoid = torch.sigmoid(conv)
return sigmoid
|
GHData/tfmoraes_deep_heart_torch/model.py: 72-101
def forward(self, img):
enc1 = self.encoder1(img)
enc2 = self.encoder2(self.pool1(enc1))
enc3 = self.encoder3(self.pool2(enc2))
enc4 = self.encoder4(self.pool3(enc3))
bottleneck = self.bottleneck(self.pool4(enc4))
upconv4 = self.upconv4(bottleneck)
dec4 = torch.cat((upconv4, enc4), dim=1)
dec4 = self.decoder4(dec4)
upconv3 = self.upconv3(dec4)
dec3 = torch.cat((upconv3, enc3), dim=1)
dec3 = self.decoder3(dec3)
upconv2 = self.upconv2(dec3)
dec2 = torch.cat((upconv2, enc2), dim=1)
dec2 = self.decoder2(dec2)
upconv1 = self.upconv1(dec2)
dec1 = torch.cat((upconv1, enc1), dim=1)
dec1 = self.decoder1(dec1)
conv = self.conv(dec1)
sigmoid = torch.sigmoid(conv)
return sigmoid
|
GHData/chLFF_alphaGAN-GP-for-images-augmentation-in-PyTorch/model.py: 193-202
def forward(self, x):
x = x.view(-1, self.z_size, 1, 1)
x = self.upsample1(x)
x = self.upsample2(x)
x = self.upsample3(x)
x = self.upsample4(x)
x = self.upsample5(x)
x = self.upsample6(x)
x = self.upsample7(x)
return x
|
GHData/HyperGDX_BVP_torch/ae_model.py: 58-71
def forward(self, x): # 32,28,64
y = self.linear2(x) # 32,28,256
y = F.relu(y)
y = self.linear1(y) # 32,28,1600
y = y.contiguous().view(y.size(0), y.size(1), 16, 16, 16) # 32,28,16,10,10
# y = self.upsample1(y) # 32,28,16,20,20
y = F.relu(y)
y = self.conv2(y)
y = F.relu(y)
y = self.conv1(y)
return y
|
GHData/Jw-Jn_PyTorch-Datathon-Palm/model.py: 24-40
def forward(self, x):
# print(x.shape)
out = self.backbone(x)
# print('backbone', out.shape)
out = out.view(out.size(0), -1)
# print('view', out.shape)
out = self.fc1(out)
# print('fc1',out.shape)
out = self.dropout1(out)
out = self.fc2(out)
out = self.dropout2(out)
out = self.fc3(out)
# print('fc2',out.shape)
out = self.sigmoid(out)
return out
|
GHData/tuladhay_ATOC_COMA_PyTorch/algorithm.py: 126-139
def forward(self, inputs, actions):
x = inputs
x = self.linear1(x)
x = self.ln1(x)
x = F.relu(x)
x = torch.cat((x, actions), 1)
x = self.linear2(x)
x = self.ln2(x)
x = F.relu(x)
V = self.V(x)
return V
|
GHData/dbbbbm_f-AnoGAN-PyTorch/wgan64x64.py: 269-279
def extract_feature(self, input):
output = input.contiguous()
output = output.view(-1, 3, DIM, DIM)
output = self.conv1(output)
output = self.rb1(output)
output = self.rb2(output)
output = self.rb3(output)
output = self.rb4(output)
output = output.view(-1, 4*4*8*self.dim)
return output
|
GHData/DanielCoelho112_DDPG-PyTorch/critic_network.py: 52-63
def forward(self, state, action):
state_value = self.fc1(state)
state_value = self.bn1(state_value)
state_value = F.relu(state_value)
state_value = self.fc2(state_value)
state_value = self.bn2(state_value)
action_value = self.action_value(action)
state_action_value = F.relu(torch.add(state_value, action_value))
state_action_value = self.q(state_action_value)
return state_action_value
|
GHData/Naagar_pyTorch_Lightning_tutorial/pytorch_Lightning.py: 65-75
def forward(self, x):
batch_size, channels, width, height = x.size()
x = x.view(batch_size, -1)
x = self.layer_1(x)
x = F.relu(x)
x = self.layer_2(x)
x = F.relu(x)
x = self.layer_3(x)
x = F.log_softmax(x, dim=1)
return x
|
GHData/HyperGDX_BVP_torch/vae_model.py: 82-94
def forward(self, z):
out = self.fc1(z)
out = self.relu(out)
out = self.fc2(out)
out = out.view(-1, 80, 4, 4) # the shape from encoder before its first FC layers
out = self.convt3(out)
out = self.convt4(out)
out = self.convt5(out)
out = self.convt6(out)
return self.tanh(out)
|
GHData/cjearce_fashion_mnist_torch/FASHION_MNIST_FFNN.py: 126-144
def forward(self, xb):
# Flatten the image tensors
xb = xb.view(xb.size(0), -1)
# Get intermediate outputs using hidden layer
out = self.linear1(xb)
# Apply activation function
out = F.relu(out)
# Get intermediate outputs using hidden layer 2
out = self.linear2(out)
# Apply activation function
out = F.relu(out)
# Get intermediate outputs using hidden layer 3
out = self.linear3(out)
# Apply activation function
out = F.relu(out)
# Get predictions using output layer
out = self.linear4(out)
return out
|
GHData/DableUTeeF_HiResTorch/resnet20.py: 94-103
def forward(self, x):
out = self.bn1(self.conv1(x))
out = swish(out, self.activation)
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
|
GHData/theidentity_PyTorch-One-Page-Trainer/resnet.py: 87-98
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
|
GHData/ZhiwenShao_PyTorch-JAANet/network.py: 72-85
def forward(self, x):
x = self.conv(x)
local_branch1 = self.local_conv_branch1(x)
local_branch2 = self.local_conv_branch2(local_branch1)
local_branch3 = self.local_conv_branch3(local_branch2)
local_out = torch.cat((local_branch1, local_branch2, local_branch3), 1)
out = x + local_out
out = self.bn(out)
out = self.relu(out)
return out
|
GHData/DeepLatte_DCTTS-torch/networks.py: 94-106
def forward(self, input):
txtEncOut = self.EmbLayer(input)
txtEncOut = txtEncOut.transpose(1,2) # (B, e, T)
txtEncOut = self.Conv1st(txtEncOut)
txtEncOut = self.Conv2nd(txtEncOut)
txtEncOut = self.HcTwice1(txtEncOut)
txtEncOut = self.HcTwice2(txtEncOut)
txtEncOut = self.Hc3(txtEncOut)
txtEncOut = self.Hc4(txtEncOut)
K, V = torch.chunk(txtEncOut, 2, 1) # Divide txtEncOut along axis 1 to get 2 matrices.
return K, V
|
GHData/daveboat_torch_gan_example/model.py: 72-87
def forward(self, x):
x = self.conv1(x)
x = self.leakyrelu1(x)
x = self.dropout1(x)
# [N, 64, 14, 14]
x = self.conv2(x)
x = self.leakyrelu2(x)
x = self.dropout2(x)
# [N, 64, 7, 7]
x = x.view((-1, 128*7*7))
# [N, 128*7*7]
x = self.fc(x)
# [N, 1]
return x
|
GHData/huangtao36_PyTorch-Fully-Convolutional-ResNet-50/network_ResNet.py: 103-113
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.conv2(out)
out = self.conv3(out)
if self.stride != 1 or self.first == True:
identity = self.downsample(x)
out = out + identity
out = self.relu(out)
return out
|
GHData/huangtao36_PyTorch-Fully-Convolutional-ResNet-50/network_ResNet.py: 128-138
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.conv2(out)
out = self.conv3(out)
if self.stride != 1 or self.first == True:
identity = self.downsample(x)
out = out + identity
out = self.relu(out)
return out
|
GHData/Kongsea_finetune-in-PyTorch/pnasnet.py: 85-98
def forward(self, x):
x = self.relu_1(x)
if self.zero_pad:
x = self.zero_pad(x)
x = self.separable_1(x)
if self.zero_pad:
x = x[:, :, 1:, 1:].contiguous()
x = self.bn_sep_1(x)
x = self.relu_2(x)
x = self.separable_2(x)
x = self.bn_sep_2(x)
return x
|
GHData/theidentity_PyTorch-One-Page-Trainer/shake_shake.py: 103-113
def forward(self, x):
h = self.c_in(x)
h = self.layer1(h)
h = self.layer2(h)
h = self.layer3(h)
h = F.relu(h)
h = F.avg_pool2d(h, 8)
h = h.view(-1, self.in_chs[3])
h = self.fc_out(h)
return h
|
GHData/Ninebell_torch_module/layers.py: 110-123
def forward(self, x):
init = x
x = self.batch1(x)
x = self.activation(x)
x = self.block1(x)
x = self.block2(x)
x = self.c3(x)
if self.attention:
x = self.attention(x)
return x + init
|
GHData/Arseni1919_PyTorch_Lightning_example/main.py: 34-55
def forward(self, x):
batch_size, channels, width, height = x.size()
# (b, 1, 28, 28) -> (b, 1*28*28)
x = x.view(batch_size, -1)
# layer 1 (b, 1*28*28) -> (b, 128)
x = self.layer_1(x)
x = torch.relu(x)
# layer 2 (b, 128) -> (b, 256)
x = self.layer_2(x)
x = torch.relu(x)
# layer 3 (b, 256) -> (b, 10)
x = self.layer_3(x)
# probability distribution over labels
x = torch.log_softmax(x, dim=1)
return x
|
GHData/qiaosiyi_learn_torch/mymodel.py: 104-116
def forward(self, x):
# 在这里,整个ResNet18的结构就很清晰了
out = self.conv1(x)
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.fc(out)
return out
|
GHData/Ovikx_PyTorch-GAN/dcgan.py: 170-182
def forward(self, x):
'''
Passes the input through the layer structure (aka forward propagation)
'''
x = self.dense_stack(x)
x = self.conv_stack1(x)
x = self.conv_stack2(x)
x = self.conv_stack3(x)
x = self.conv_stack4(x)
x = self.conv_stack5(x)
x = self.conv_stack6(x)
return x
|
GHData/hihiendru_pyTorch101Blog/part2Classifier.py: 75-87
def forward(self, x):
# Output of one layer becomes input to the next
out = nn.ReLU()(self.bn1(self.conv1(x)))
out = self.block1(out)
out = self.block2(out)
out = self.block3(out)
out = self.block4(out)
out = nn.AvgPool2d(4)(out)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
|
GHData/DableUTeeF_HiResTorch/models.py: 40-51
def forward(self, z):
x, w = z[0], z[1]
out = self.conv1(x)
out = self.bn1(out)
w = torch.cat((w, F.avg_pool2d(out, self.pool_size)), 1)
out = F.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = F.relu(out)
return out, w
|
GHData/erikqu_EnhanceNet-PyTorch/model.py: 56-66
def forward(self, x):
out = self.conv1(x)
out = self.residuals(out)
out = self.conv2(out)
out= self.upsample(out)
out = self.conv3(out)
i_res = self.conv4(out)
i_bicubic = self.resize(x)
out = torch.add(i_bicubic ,i_res)
return out
|
GHData/mindingyao_MobileNetV2/ResNet.py: 17-28
def forward(self, x):
out = self.conv1(x)
out = self.bn1(out)
out = self.relu1(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu2(out)
residual = x if self.shortcut == None else self.shortcut(x)
out += residual
return out
|
GHData/Maggiking_PGGAN-PyTorch/model.py: 47-60
def forward(self, x):
if self.upsample is not None:
x = self.upsample(x)
# x = self.conv1(x*scale1)
x = self.conv1(x)
x = self.relu(x)
x = self.pixelwisenorm(x)
# x = self.conv2(x*scale2)
x = self.conv2(x)
x = self.relu(x)
x = self.pixelwisenorm(x)
return x
|
GHData/pdefusco_Distributed_PyTorch_Horovod/models.py: 88-99
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
|
GHData/claydeman_torch-learning/simpleClassifier.py: 51-65
def forward(self,x):
x=self.conv1(x)
x=F.max_pool2d(x,2,2)
x=F.relu(x)
x=self.conv2(x)
x=F.max_pool2d(x,2,2)
x=F.relu(x)
x=x.view(-1,4*4*50)
x=self.fc1(x)
x=self.fc2(x)
return x
|
GHData/mruberry_pnas_torch/pnas.py: 61-74
def forward(self, x):
x = self.relu_1(x)
if self.is_zero_padded:
x = self.zero_pad(x)
x = self.separable_1(x)
if self.is_zero_padded:
x = x[:, :, 1:, 1:].contiguous()
x = self.bn_sep_1(x)
x = self.relu_2(x)
x = self.separable_2(x)
x = self.bn_sep_2(x)
return x
|
GHData/acholston_PyTorch_Exercises/Ex11-1b.py: 225-240
def forward(self, x):
branch3x3 = self.branch3x3_1(x)
branch3x3 = self.branch3x3_2(branch3x3)
branch7x7 = self.branch7x7_1(x)
branch7x7 = self.branch7x7_2(branch7x7)
branch7x7 = self.branch7x7_3(branch7x7)
branch7x7 = self.branch7x7_4(branch7x7)
mp = self.mp(x)
outputs = torch.cat([branch3x3, branch7x7, mp], 1)
return outputs
#Create overall network
|
GHData/lucabergamini_torch_snippets/gan.py: 87-98
def forward(self, i):
i = self.conv_0(i)
i = self.conv_1(i)
i = i.view(len(i), -1)
i = self.fc1(i)
i = self.dp1(i)
i = F.relu(i)
i = self.fc2(i)
i = F.log_softmax(i)
return i
|
GHData/acholston_PyTorch_Exercises/Ex11-1a.py: 160-176
def forward(self, x):
branch3x3 = self.branch3x3_1(x)
branch3x3 = self.branch3x3_2(branch3x3)
branch3x3_7x7 = self.branch3x3_7x7_1(x)
branch3x3_7x7 = self.branch3x3_7x7_2(branch3x3_7x7)
branch3x3_7x7 = self.branch3x3_7x7_3(branch3x3_7x7)
branch3x3_7x7 = self.branch3x3_7x7_4(branch3x3_7x7)
branch_pool = self.mp(x)
#Concat
outputs = [branch3x3, branch3x3_7x7, branch_pool]
return torch.cat(outputs, 1)
#Fifth and last Inception Module
|
GHData/Maggiking_SRGAN-PyTorch/model.py: 50-65
def forward(self, x):
out = self.conv1(x)
identity = self.relu(out)
out = self.resblocks(identity)
out = self.conv2(out)
out = self.bn(out)
out += identity
out = self.upblocks(out)
out = self.conv3(out)
return torch.tanh(out)
|
GHData/js05212_PyTorch-for-NPN/main_mlp.py: 195-205
def forward(self, x):
x = x.view(-1, 784)
x = self.fc1(x)
x = self.sigmoid1(x)
x = self.drop1(x)
x = self.fc2(x)
x = self.sigmoid2(x)
x = self.drop2(x)
x, s = self.sigmoid3(self.fc3(x))
return x, s
|
GHData/AIFanatic_PyTorch-Voxel-VAE/model.py: 128-142
def encode(self, x):
encoder = self.enc_conv1(x)
encoder = self.enc_conv2(encoder)
encoder = self.enc_conv3(encoder)
encoder = self.enc_conv4(encoder)
fc1 = self.enc_fc1(encoder)
mu = self.mu(fc1)
sigma = self.sigma(fc1)
z = self.reparameterize(mu, sigma)
return mu, sigma, z
|
GHData/RizhaoCai_PyTorch_ONNX_TensorRT/trt_int8_demo.py: 27-42
def forward_default(self, X_in):
print("Function forward_default called! \n")
x = self.layer1(X_in)
x = self.relu(x)
x = self.max_pool(x)
x = self.layer2(x)
x = self.relu(x)
x = self.avg_pool(x)
# Such an operationt is not deterministic since it would depend on the input and therefore would result in errors
length_of_fc_layer = x.size(1)
x = x.view(-1, length_of_fc_layer)
x = self.fc(x)
return x
|
GHData/sharathmaidargi_finetune_torchvision/inception.py: 242-255
def forward(self, x):
branch3x3 = self.branch3x3_1(x)
branch3x3 = self.branch3x3_2(branch3x3)
branch7x7x3 = self.branch7x7x3_1(x)
branch7x7x3 = self.branch7x7x3_2(branch7x7x3)
branch7x7x3 = self.branch7x7x3_3(branch7x7x3)
branch7x7x3 = self.branch7x7x3_4(branch7x7x3)
branch_pool = F.max_pool2d(x, kernel_size=3, stride=2)
outputs = [branch3x3, branch7x7x3, branch_pool]
return torch.cat(outputs, 1)
|
GHData/CeasonCui_CNN_PyTorch/_5_cnn_acc_check.py: 104-119
def forward(self, x):
x = x.float()
x = x.view(-1, 1, 64, 64)
#x = x.reshape(-1, 1, 64, 64)
x = self.conv1(x)
x2 = self.relu(x)
x1 = x2.reshape(-1, 1, 64, 64)
x = self.pool(x)
x = self.conv2(x)
x = self.conv3(x)
#x = self.conv4(x)
x = x.view(x.size(0), -1) # flatten the output of conv2 to (batch_size, 32 * 7 * 7)
output = self.fc1(x)
#output = self.softmax(x)
return output # return x for visualization
|
GHData/wangqs97_UDA-PyTorch/resnet.py: 58-69
def forward(self, x):
out = self.conv1(x)
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.fc(out)
return out
|
GHData/skmhrk1209_ResNet-PyTorch/model.py: 156-169
def forward(self, inputs):
inputs = self.conv(inputs)
inputs = self.pool(inputs)
for residual_block in self.residual_blocks:
inputs = residual_block(inputs)
inputs = self.norm(inputs)
inputs = self.act(inputs)
inputs = inputs.mean((2, 3))
inputs = self.linear(inputs)
return inputs
|
GHData/aeadod_ocr-torch/Mytorch.py: 100-111
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
|
GHData/DableUTeeF_HiResTorch/models.py: 19-30
def forward(self, z):
x, w = z[0], z[1]
out = self.conv1(x)
out = self.bn1(out)
w += F.avg_pool2d(out, self.pool_size)
out = F.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = F.relu(out)
return out, w
|
GHData/js05212_PyTorch-for-NPN/main_mlp.py: 166-176
def forward(self, x):
x = x.view(-1, 784)
x = self.fc1(x)
x = self.sigmoid1(x)
x = self.drop1(x)
x = self.fc2(x)
x = self.sigmoid2(x)
x = self.drop2(x)
x, s = self.sigmoid3(self.fc3(x))
return x, s
|
GHData/zoe0919_PyTorch_CIFAR10-RESNET/resnet.py: 56-67
def forward(self, x):
out = self.conv1(x)
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.fc(out)
return out
|
GHData/devonsuper_PyTorch_ONNX_TensorRT/monodepth2conversion.py: 28-43
def forward_default(self, X_in):
print("Function forward_default called! \n")
x = self.layer1(X_in)
x = self.relu(x)
x = self.max_pool(x)
x = self.layer2(x)
x = self.relu(x)
x = self.avg_pool(x)
# Such an operationt is not deterministic since it would depend on the input and therefore would result in errors
length_of_fc_layer = x.size(1)
x = x.view(-1, length_of_fc_layer)
x = self.fc(x)
return x
|
GHData/HyperGDX_BVP_torch/vae_model.py: 49-64
def forward(self, x):
"""forward pass for encoder.
x: img with (3, 64, 64)
"""
b = x.size(0)
out = self.conv1(x)
out = self.conv2(out)
out = self.conv3(out)
out = self.conv4(out)
out = self.dropout(out)
out = out.view(b, -1)
out = self.fc5(out)
out = self.relu(out)
return self.fc6(out)
|
GHData/heming-zhang_Torch-DL/mnist_cnn.py: 54-64
def forward(self, x):
x = self.conv1(x)
self.after_conv1 = x
x = self.conv2(x)
self.after_conv2 = x
x = self.conv3(x)
self.after_conv3 = x
x = x.view(x.size(0), -1) # flat (batch_size, 40 * 7 * 7)
y_pred = self.out(x)
return y_pred
|
GHData/DableUTeeF_HiResTorch/models.py: 125-136
def forward(self, x):
out = self.conv1(x)
out = self.bn1(out)
w = F.avg_pool2d(out, 32)
out = F.relu(out)
z = self.layer1((out, w))
z = self.layer2(z)
out, w = self.layer3(z)
w = F.relu(w)
out = w.view(w.size(0), -1)
out = self.linear(out)
return out
|
GHData/devonsuper_PyTorch_ONNX_TensorRT/trt_int8_demo.py: 27-42
def forward_default(self, X_in):
print("Function forward_default called! \n")
x = self.layer1(X_in)
x = self.relu(x)
x = self.max_pool(x)
x = self.layer2(x)
x = self.relu(x)
x = self.avg_pool(x)
# Such an operationt is not deterministic since it would depend on the input and therefore would result in errors
length_of_fc_layer = x.size(1)
x = x.view(-1, length_of_fc_layer)
x = self.fc(x)
return x
|
GHData/jerryyyyy708_Double-UNet_PyTorch/modules.py: 77-88
def forward(self, x):
image_features = self.mean(x)
image_features = self.conv(image_features)
image_features = self.upsample(image_features)
atrous_block1 = self.atrous_block1(x)
atrous_block6 = self.atrous_block6(x)
atrous_block12 = self.atrous_block12(x)
atrous_block18 = self.atrous_block18(x)
net = self.conv_1x1_output(torch.cat([image_features, atrous_block1, atrous_block6,
atrous_block12, atrous_block18], dim=1))
return net
|
GHData/YZJ6GitHub_PyTorch_Learing/Resnet.py: 92-103
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
|
GHData/ahmedfadhil_DL-PyTorch/cnn_ffnn_validpool.py: 43-62
def forward(self, *input):
# con1
output = self.cnn1(input)
output = self.relu1(output)
# avg pool
output = self.avgpool1(output)
# con1
output = self.cnn2(output)
output = self.relu2(output)
# avg pool
output = self.avgpool2(output)
output = output.view(output.size(0), -1)
output = self.fc1(output)
return output
# Step4: instantiate model class
|
GHData/anushmite_PyTorch-EfficientNet/layers.py: 80-94
def forward(self, x):
residual = x
x = self.expand_pw(x)
x = self.depthwise(x)
x = self.se(x)
x = self.reduce_pw(x)
if self.skip_connection:
x = self.dropsample(x)
x = x + residual
return x
|
GHData/dingjun6953_PyTorch_docs/1.py: 139-165
def forward(self, x):
"""
:param x: Input data
:return: output - mnist digit label for the input image
"""
batch_size = x.size()[0]
# (b, 1, 28, 28) -> (b, 1*28*28)
x = x.view(batch_size, -1)
# layer 1 (b, 1*28*28) -> (b, 128)
x = self.layer_1(x)
x = torch.relu(x)
# layer 2 (b, 128) -> (b, 256)
x = self.layer_2(x)
x = torch.relu(x)
# layer 3 (b, 256) -> (b, 10)
x = self.layer_3(x)
# probability distribution over labels
x = torch.log_softmax(x, dim=1)
return x
|
GHData/junzhouye_NeuronCoverageTorch/cifar10_resnet.py: 80-92
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
|
GHData/bigmb_Unet-Segmentation-Pytorch-Nest-of-Unets/Models.py: 562-573
def forward(self, x):
x = self.c1(x)
if self.bn:
x = self.b1(x)
x = F.relu(x)
x = self.c2(x)
if self.bn:
x = self.b2(x)
x = self.last_active(x)
return x
|
GHData/Thepnathi_Image-classification-using-PyTorch-and-CNNs/cnn.py: 27-65
def forward(self, x):
"""
Forward pass,
x shape is (batch_size, 3, 250, 250)
the comments ignore the batch size (it stays the same accross all layers)
"""
# When applying a kernel, the shape changes: (N - F + 2P)/S + 1
# First hidden layer: a convolution layer with a filter size 7x7, stride 2, padding 3,
# the number of channels 64, followed by Batch Normalization and ReLu.
# shape : 3x250x250 -> 64x125x125 = (250 - 7 + 2*6)/2 + 1 = 128
x = self.conv1(x)
x = self.batch_norm_and_relu(x)
# Second hidden layer: max pooling with a filter size 3x3, stride 2, padding 0;
# 64x125x125 -> 64x62x62
x = self.pooling_layer(x)
# Third hidden layer: a convolution layer with a filter size 3x3, stride 1, padding 1,
# the number of channels 64, followed by Batch Normalization and ReLu.
# 64x62x62 -> 64x62x62
x = self.conv2(x)
x = self.batch_norm_and_relu(x)
# Fourth hidden layer: max pooling with a filter size 3x3, stride 2, padding 0;
# 64x62x62 -> 64x30x30
x = self.pooling_layer(x)
# Fully connected layer, with the output channel 5 (i.e., the number of classes);
# 64x30x30 -> 57600 (Reshape data for the fully connected layer).
x = x.view(-1, 64 * 30 * 30)
# 57600 -> 5
x = self.fc(x)
# Return raw, unnormalised scores for each class. nn.CrossEntropyLoss() will apply the
# softmax function to normalise the scores in the range [0,1].
return x
|
GHData/ruihangdu_PyTorch-Deep-Compression/lenet_5.py: 39-57
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.conv2(x)
# x = self.conv2_drop(x)
x = self.pool(F.relu(x))
x = x.view(-1, self.num_flat_features(x))
x = self.fc1(x)
# x = self.fc1_drop(x)
x = F.relu(x)
x = self.fc2(x)
# x = self.fc2_drop(x)
x = F.relu(x)
x = self.fc3(x)
return x
|
GHData/loui0620_pyTorch_bean_classifier/ResNet18.py: 56-67
def forward(self, x):
out = self.conv1(x)
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.fc(out)
return out
|
GHData/furkancoskun_ResnetCrowd_PyTorch/resnetCrowd.py: 86-96
def forward(self, x):
x = self.backbone(x)
heatmap = self.countingHead(x)
x = self.averagePool(x)
x = x.view(64*154*84)
x = self.fc32(x)
behavCls = self.behavClsHead(x)
densLevelCls = self.densLevClsHead(x)
count = self.countingHead(x)
return behavCls, densLevelCls, count, heatmap
|
GHData/rushiv0609_SKNET-PyTorch/SKNET.py: 371-383
def forward(self, x):
fea = self.basic_conv(x)
fea = self.stage_1(fea)
fea = self.stage_2(fea)
fea = self.stage_3(fea)
# print(fea.shape)
fea = self.stage_4(fea)
fea = self.gap(fea)
# return fea
fea = fea.view(fea.shape[0], -1)
fea = self.classifier(fea)
return fea
|
GHData/humblemat810_vision_torch_showcase/kaggle_bee.py: 110-130
def forward(self, x):
if not is_large(x):
# x = self.small_transform(x)
# run max_pool stride 2 for 2 times to get 32 by 32 after cropping 128 by 128
h = self.small_image_pre_process(x)
else:
# x = self.large_transform(x)
# run max_pool stride 2 for 1 times to get 32 by 32 after cropping 64 by 64
h = self.large_image_pre_process(x)
h = self.conv1(h)
h = self.conv2(h)
h = self.avg_pool2d(h)
h = h.view(h.size(0), -1)
return h
# do later common upper layers after the preprocess layers
pass
|
GHData/TTdeveloping_PyTorch_Medical_BiLSTM_CRF_NER/model.py: 71-84
def forward(self, word, bound, flag, radical, pinyin):
word_embed = self.word_embed(word)
bound_embed = self.bound_embed(bound)
flag_embed = self.flag_embed(flag)
radical_embed = self.radical_embed(radical)
pinyin_embed = self.pinyin_embed(pinyin)
word_con_embed = torch.cat((word_embed, bound_embed, flag_embed, radical_embed, pinyin_embed), 2)
x = self.dropout(word_con_embed)
x, _ = self.bilstm(x)
x = torch.tanh(x)
logit = self.linear(x)
return logit
|
GHData/yearing1017_Deeplabv3_plus_PyTorch/aspp.py: 65-79
def forward(self, x):
x1 = self.aspp1(x)
x2 = self.aspp2(x)
x3 = self.aspp3(x)
x4 = self.aspp4(x)
x5 = self.global_avg_pool(x)
x5 = F.interpolate(x5, size=x4.size()[2:], mode='bilinear', align_corners=True)
x = torch.cat((x1, x2, x3, x4, x5), dim=1)
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
return self.dropout(x)
|
GHData/Lornatang_RFB_ESRGAN-PyTorch/model.py: 335-349
def _forward_impl(self, x: torch.Tensor) -> torch.Tensor:
out1 = self.conv1(x)
out_a = self.trunk_a(out1)
out_rfb = self.trunk_rfb(out_a)
out2 = self.conv2(out_rfb)
out = torch.add(out1, out2)
out = self.conv2(out)
out = self.upsampling(out)
out = self.conv3(out)
out = self.conv4(out)
out = torch.clamp_(out, 0.0, 1.0)
return out
|
GHData/LiaoWC_VAE-MNIST-PyTorch/vae.py: 59-75
def forward(self, x):
bs = x.shape[0]
x = self.conv0(x)
x = self.relu0(x)
x = self.conv1(x)
x = self.relu1(x)
x = self.conv2(x)
x = self.relu2(x)
x = self.conv3(x)
x = self.relu3(x)
x = self.fc0(x.view(bs, -1))
return self.fc_mu(x), self.fc_logvar(x)
#
# x = self.fcs(x.view(x.shape[0], -1))
# return self.fc_mu(x), self.fc_logvar(x)
|
GHData/LiaoWC_VAE-MNIST-PyTorch/show.py: 42-58
def forward(self, x):
bs = x.shape[0]
x = self.conv0(x)
x = self.relu0(x)
x = self.conv1(x)
x = self.relu1(x)
x = self.conv2(x)
x = self.relu2(x)
x = self.conv3(x)
x = self.relu3(x)
x = self.fc0(x.view(bs, -1))
return self.fc_mu(x), self.fc_logvar(x)
#
# x = self.fcs(x.view(x.shape[0], -1))
# return self.fc_mu(x), self.fc_logvar(x)
|
GHData/vdivakar_PyTorch_learnings/impl_forward_method.py: 32-55
def forward(self, t):
t = self.conv1(t)
t = F.relu(t)
t = F.max_pool2d(t, kernel_size=2, stride=2)
t = self.conv2(t)
t = F.relu(t)
t = F.max_pool2d(t, kernel_size=2, stride=2)
''' Flatten before dense layer'''
t = t.flatten(1, -1) #(start_dim, end_dim)
t = self.fc1(t)
t = F.relu(t)
t = self.fc2(t)
t = F.relu(t)
t = self.out(t)
#t = F.softmax(t)
'''we'll use loss fxn = cross_entropy
which has inbuild softmax calculation.
[vs cross_entropy_with_logits]'''
return t
|
GHData/pbehjatii_OverNet-PyTorch/OverNet.py: 24-41
def forward(self, x):
c0 = o0 = x
RB1 = self.RB1(o0)
concat1 = torch.cat([c0, RB1], dim=1)
out1 = self.reduction1(concat1)
RB2 = self.RB2(out1)
concat2 = torch.cat([concat1, RB2], dim=1)
out2 = self.reduction2(concat2)
RB3 = self.RB3(out2)
concat3 = torch.cat([concat2, RB3], dim=1)
out3 = self.reduction3(concat3)
return out3
|
GHData/Windxy_Classic_Network_PyTorch/InceptionV4.py: 194-225
def forward(self, x):
# 输入299 x 299 x 3
# Stem Module
out = self.stem(x)
# 输出35 x 35 x 384
# InceptionA Module * 4
out = self.icpA(self.icpA(self.icpA(self.icpA(out))))
# 输出35 x 35 x 384
# ReductionA Module
out = self.redA(out)
# 输出17 x 17 x 1024
# InceptionB Module * 7
out = self.icpB(self.icpB(self.icpB(self.icpB(self.icpB(self.icpB(self.icpB(out)))))))
# 输出17 x 17 x 1024
# ReductionB Module
out = self.redB(out)
# 输出8 x 8 x 1536
# InceptionC Module * 3
out = self.icpC(self.icpC(self.icpC(out)))
# 输出8 x 8 x 1536
# Average Pooling
out = self.avgpool(out)
# 1 x 1 x 1536
out = out.view(out.size(0), -1)
# 1536
# Dropout
out = self.dropout(out)
# Linear(Softmax)
out = self.linear(out)
return out
|
GHData/naveen-hyperworks_Convolutional-Neural-Fabrics-PyTorch-Wrapper/neural_fabrics.py: 115-149
def forward(self, x):
out = self.bn1(x)
out = self.relu(out)
out = self.conv1(x)
out = self.bn2(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn3(out)
out = self.relu(out)
out = self.conv3(out)
residual = x
if self.downsample is not None:
residual = self.downsample(residual)
if self.gating == 'total_dropout':
if np.random.randint(self.op_count) > 0: ### probability of remaining active is 1/op_count
self.val = 0
if self.gating is not None:
residual = torch.mul(residual, self.val)
out += residual
return out
|
GHData/chaozhong2010_SENet-PyTorch/se_resnet.py: 39-67
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
original_out = out
out = self.globalAvgPool(out)
out = out.view(out.size(0), -1)
out = self.fc1(out)
out = self.relu(out)
out = self.fc2(out)
out = self.sigmoid(out)
out = out.view(out.size(0), out.size(1), 1, 1)
out = out * original_out
out += residual
out = self.relu(out)
return out
|
GHData/miraclewkf_SENet-PyTorch/se_resnet.py: 39-67
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
original_out = out
out = self.globalAvgPool(out)
out = out.view(out.size(0), -1)
out = self.fc1(out)
out = self.relu(out)
out = self.fc2(out)
out = self.sigmoid(out)
out = out.view(out.size(0), out.size(1), 1, 1)
out = out * original_out
out += residual
out = self.relu(out)
return out
|
GHData/hiroyasuakada_CycleGAN-PyTorch/model_base.py: 417-430
def forward(self, input_img):
out = self.model_1(input_img)
out = self.model_2(out)
out = self.model_3(out)
layer_output_list_1, _ = self.model_4(out)
out = self.model_5(layer_output_list_1[0])
out = self.model_6(out)
out = self.model_7(out)
layer_output_list_2, _ = self.model_8(out)
out = self.model_9(layer_output_list_2[0])
out = self.model_10(out)
out = self.model_11(out)
return out
|
GHData/mf1024_Contrastive-Predictive-Coding-for-Image-Recognition-in-PyTorch/resnet_blocks.py: 118-139
def forward(self,x):
identity = x
if self.is_downsampling_block:
identity = self.projection_shortcut(identity)
identity = self.projection_batch_norm(identity)
x = self.conv_layer_1(x)
x = self.batch_norm_1(x)
x = nn.functional.relu(x)
x = self.conv_layer_2(x)
x = self.batch_norm_2(x)
x = nn.functional.relu(x)
x = self.conv_layer_3(x)
x = x + identity
x = nn.functional.relu(x)
return x
|
GHData/diaomin_PyTorch-implementation-of-GhostNet/ghostnet.py: 143-163
def forward(self, x):
residual = x
# 1st ghost bottleneck
x = self.ghost1(x)
# Depth-wise convolution
if self.stride > 1:
x = self.conv_dw(x)
x = self.bn_dw(x)
# Squeeze-and-excitation
if self.se is not None:
x = self.se(x)
# 2nd ghost bottleneck
x = self.ghost2(x)
x += self.shortcut(residual)
return x
|
GHData/MouxiaoHuang_myPRNet-PyTorch/ResFCN256.py: 44-61
def forward(self, x):
shortcut = x
(_, _, _, x_planes) = x.size()
if self.stride != 1 or x_planes != self.out_planes:
shortcut = self.shortcut_conv(x)
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
x += shortcut
x = self.normalizer_fn(x)
x = self.activation_fn(x)
return x
|
GHData/miraclewkf_SENet-PyTorch/se_resnet.py: 95-127
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
original_out = out
out = self.globalAvgPool(out)
out = out.view(out.size(0), -1)
out = self.fc1(out)
out = self.relu(out)
out = self.fc2(out)
out = self.sigmoid(out)
out = out.view(out.size(0),out.size(1),1,1)
out = out * original_out
out += residual
out = self.relu(out)
return out
|
GHData/miraclewkf_SENet-PyTorch/se_resnext.py: 41-73
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
original_out = out
out = self.globalAvgPool(out)
out = out.view(out.size(0), -1)
out = self.fc1(out)
out = self.relu(out)
out = self.fc2(out)
out = self.sigmoid(out)
out = out.view(out.size(0), out.size(1), 1, 1)
out = out * original_out
out += residual
out = self.relu(out)
return out
|
GHData/chaozhong2010_SENet-PyTorch/se_resnext.py: 41-73
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
original_out = out
out = self.globalAvgPool(out)
out = out.view(out.size(0), -1)
out = self.fc1(out)
out = self.relu(out)
out = self.fc2(out)
out = self.sigmoid(out)
out = out.view(out.size(0), out.size(1), 1, 1)
out = out * original_out
out += residual
out = self.relu(out)
return out
|
GHData/chaozhong2010_SENet-PyTorch/se_resnet.py: 95-127
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
original_out = out
out = self.globalAvgPool(out)
out = out.view(out.size(0), -1)
out = self.fc1(out)
out = self.relu(out)
out = self.fc2(out)
out = self.sigmoid(out)
out = out.view(out.size(0),out.size(1),1,1)
out = out * original_out
out += residual
out = self.relu(out)
return out
|
GHData/VisionSystemsInc_face-parsing.PyTorch/resnet.py: 36-50
def forward(self, x):
residual = self.conv1(x)
residual = F.relu(self.bn1(residual))
residual = self.conv2(residual)
residual = self.bn2(residual)
shortcut = x
if self.downsample is not None:
shortcut = self.downsample(x)
out = shortcut + residual
out = self.relu(out)
return out
|
GHData/jrobertojunior_face-parsing.PyTorch/resnet.py: 36-50
def forward(self, x):
residual = self.conv1(x)
residual = F.relu(self.bn1(residual))
residual = self.conv2(residual)
residual = self.bn2(residual)
shortcut = x
if self.downsample is not None:
shortcut = self.downsample(x)
out = shortcut + residual
out = self.relu(out)
return out
|
GHData/zllrunning_face-parsing.PyTorch/resnet.py: 36-50
def forward(self, x):
residual = self.conv1(x)
residual = F.relu(self.bn1(residual))
residual = self.conv2(residual)
residual = self.bn2(residual)
shortcut = x
if self.downsample is not None:
shortcut = self.downsample(x)
out = shortcut + residual
out = self.relu(out)
return out
|
GHData/zllrunning_face-makeup.PyTorch/resnet.py: 36-50
def forward(self, x):
residual = self.conv1(x)
residual = F.relu(self.bn1(residual))
residual = self.conv2(residual)
residual = self.bn2(residual)
shortcut = x
if self.downsample is not None:
shortcut = self.downsample(x)
out = shortcut + residual
out = self.relu(out)
return out
|
GHData/makifozkanoglu_MultiResUNet-PyTorch/multiresunet.py: 129-163
def forward(self, inp):
mresblock1 = self.mres_block1(inp)
pool = self.pool(mresblock1)
mresblock1 = self.res_path1(mresblock1)
mresblock2 = self.mres_block2(pool)
pool = self.pool(mresblock2)
mresblock2 = self.res_path2(mresblock2)
mresblock3 = self.mres_block3(pool)
pool = self.pool(mresblock3)
mresblock3 = self.res_path3(mresblock3)
mresblock4 = self.mres_block4(pool)
pool = self.pool(mresblock4)
mresblock4 = self.res_path4(mresblock4)
mresblock = self.mres_block5(pool)
up = torch.cat([self.deconv1(mresblock), mresblock4], dim=1)
mresblock = self.mres_block6(up)
up = torch.cat([self.deconv2(mresblock), mresblock3], dim=1)
mresblock = self.mres_block7(up)
up = torch.cat([self.deconv3(mresblock), mresblock2], dim=1)
mresblock = self.mres_block8(up)
up = torch.cat([self.deconv4(mresblock), mresblock1], dim=1)
mresblock = self.mres_block9(up)
conv10 = self.conv10(mresblock)
return conv10
|
GHData/lucabergamini_torch_snippets/siamese.py: 76-108
def forward(self, i):
i = self.bn1(i)
i = self.conv1(i)
i = F.relu(i)
i = F.max_pool2d(i,kernel_size=2)
i = self.bn2(i)
i = self.conv2(i)
i = F.relu(i)
i = F.max_pool2d(i,kernel_size=2)
i = self.bn3(i)
i = self.conv3(i)
i = F.relu(i)
i = F.max_pool2d(i, kernel_size=2)
i = i.view(len(i),-1)
i = self.bn4(i)
i = self.fc1(i)
#i = self.do1(i)
i = F.relu(i)
i = self.bn5(i)
i = self.fc2(i)
i = F.relu(i)
i = self.bn6(i)
i = self.fc3(i)
i = F.tanh(i)
return i
|
GHData/mzhang367_DCDH-PyTorch/model.py: 128-168
def forward(self, x):
"""
args:
x: input, size of 32 * 32
"""
# residual branch, encoder
attention_mask_16 = self.spatial_features_1(x)
attention_mask_8 = self.spatial_features_2(attention_mask_16)
attention_mask_4 = self.spatial_features_3(attention_mask_8)
attention_mask = self.fc(attention_mask_4) # 4 * 4
# residual branch, decoder
attention_mask = self.upscales_1(attention_mask)
attention_mask = self.bn1(attention_mask + attention_mask_8)
attention_mask = self.upscales_2(attention_mask)
attention_mask = self.bn2(attention_mask + attention_mask_16)
attention_mask = self.upscales_3(attention_mask)
attention_mask = self.upscales_4(attention_mask)
attention_mask = torch.sigmoid(attention_mask) # 32 * 32
# trunk branch
feature_trunk = self.attention_conv1(x)
feature_trunk = self.attention_conv2(feature_trunk)
feature_trunk = self.attention_conv3(feature_trunk)
feature_trunk = self.attention_conv4(feature_trunk) # 32 * 32
# element-wise product and sum
x_with_mix_attention = attention_mask * feature_trunk
feature_catenate = feature_trunk + x_with_mix_attention # 32 * 32
# conv. block
features_3 = self.features(feature_catenate) # size of output: 3 * 3 * 128
features_4 = self.conv4(features_3) # size of output: 2 * 2 * 256
features_a = torch.cat([features_3.view(features_3.size(0), -1), features_4.view(features_4.size(0), -1)], -1) # fusion layer
features_a = self.face_features_layer(features_a) # fc layer, 2176--> 1024
hash_a = self.hash_layer(features_a) # hashing layer, 1024 --> num_bits
return hash_a
|
GHData/gajdikuka_PyTorch-Binary-Neural-Network-with-Data-Export/bnn_mnist.py: 83-110
def forward(self, x):
x = x.view(-1, 28*28)
x = self.bn0(x)
x = self.bin0(x)
x = self.fc1(x)
x = self.bn1(x)
x = self.htanh1(x)
x = self.bin1(x)
x = self.fc2(x)
x = self.bn2(x)
x = self.htanh2(x)
x = self.bin2(x)
x = self.fc3(x)
x = self.bn3(x)
x = self.htanh3(x)
x = self.bin3(x)
x = self.fc4(x)
x = self.bn4(x)
x = self.htanh4(x)
x = self.bin4(x)
x = self.fc5(x)
return self.logsoftmax(x)
|
GHData/hghimanshu_SegNet_PyTorch/segmentationNN.py: 114-138
def forward(self, x):
out = self.batchNorm(x)
out = self.Convlayer1(out)
out = self.Convlayer2(out)
out = self.maxPool1(out)
out = self.Convlayer3(out)
out = self.Convlayer4(out)
out = self.Convlayer5(out)
out = self.maxPool2(out)
out = self.Convlayer6(out)
out = self.Convlayer7(out)
out = self.maxPool3(out)
out = self.upSample1(out)
out = self.DeConvlayer1(out)
out = self.DeConvlayer2(out)
out = self.upSample2(out)
out = self.DeConvlayer3(out)
out = self.DeConvlayer4(out)
out = self.DeConvlayer5(out)
out = self.upSample3(out)
out = self.DeConvlayer6(out)
out = self.DeConvlayer7(out)
return out
|
GHData/LongLong-Jing_PyTorch-UNet/network.py: 157-198
def forward(self, img):
#128*128
down1 = self.down1(img)
down1_pool = self.down1_pool(down1)
#64*64
down2 = self.down2(down1_pool)
down2_pool = self.down2_pool(down2)
#32*32
down3 = self.down3(down2_pool)
down3_pool = self.down3_pool(down3)
#16*16
down4 = self.down4(down3_pool)
down4_pool = self.down4_pool(down4)
#8*8
center = self.center(down4_pool)
#8*8
up4 = self.upsample4(center)
#16*16
up4 = torch.cat((down4,up4), 1)
up4 = self.up4(up4)
up3 = self.upsample3(up4)
up3 = torch.cat((down3,up3), 1)
up3 = self.up3(up3)
up2 = self.upsample2(up3)
up2 = torch.cat((down2,up2), 1)
up2 = self.up2(up2)
up1 = self.upsample1(up2)
up1 = torch.cat((down1,up1), 1)
up1 = self.up1(up1)
prob = self.classifier(up1)
# print(prob.size())
return prob
|
GHData/lembolov9_u-net-torch/model.py: 67-104
def forward(self, x):
x1 = self.down_1(x)
x2 = self.pool(x1)
x2 = self.down_2(x2)
x3 = self.pool(x2)
x3 = self.down_3(x3)
x4 = self.pool(x3)
x4 = self.down_4(x4)
x5 = self.pool(x4)
x5 = self.down_5(x5)
x6 = self.de_up_5(x5)
x6 = torch.cat((x4, x6), dim=1)
x6 = self.up_5(x6)
x7 = self.de_up_4(x6)
x7 = torch.cat((x3, x7), dim=1)
x7 = self.up_4(x7)
x8 = self.de_up_3(x7)
x8 = torch.cat((x2, x8), dim=1)
x8 = self.up_3(x8)
x9 = self.de_up_2(x8)
x9 = torch.cat((x1, x9), dim=1)
x9 = self.up_2(x9)
x10 = self.out(x9)
return x10
|
GHData/VIPL-Audio-Visual-Speech-Understanding_LipNet-PyTorch/model.py: 59-93
def forward(self, x):
x = self.conv1(x)
x = self.relu(x)
x = self.dropout3d(x)
x = self.pool1(x)
x = self.conv2(x)
x = self.relu(x)
x = self.dropout3d(x)
x = self.pool2(x)
x = self.conv3(x)
x = self.relu(x)
x = self.dropout3d(x)
x = self.pool3(x)
# (B, C, T, H, W)->(T, B, C, H, W)
x = x.permute(2, 0, 1, 3, 4).contiguous()
# (B, C, T, H, W)->(T, B, C*H*W)
x = x.view(x.size(0), x.size(1), -1)
self.gru1.flatten_parameters()
self.gru2.flatten_parameters()
x, h = self.gru1(x)
x = self.dropout(x)
x, h = self.gru2(x)
x = self.dropout(x)
x = self.FC(x)
x = x.permute(1, 0, 2).contiguous()
return x
|
GHData/SethurajS_CNN_Architectures_in_PyTorch/GoogleNet.py: 32-60
def forward(self, x):
x = self.conv1(x)
x = self.maxpool_1(x)
x = self.conv2(x)
x = self.maxpool_2(x)
x = self.inception_3a(x)
x = self.inception_3b(x)
x = self.maxpool_3(x)
x = self.inception_4a(x)
x = self.inception_4b(x)
x = self.inception_4c(x)
x = self.inception_4d(x)
x = self.inception_4e(x)
x = self.maxpool_4(x)
x = self.inception_5a(x)
x = self.inception_5b(x)
x = self.avgpool(x)
print(x.shape)
x = x.reshape(x.shape[0], -1)
x = self.dropout(x)
print(x.shape)
x = self.fc(x)
return x
# Inception Block
|
GHData/bigmb_Unet-Segmentation-Pytorch-Nest-of-Unets/Models.py: 85-124
def forward(self, x):
e1 = self.Conv1(x)
e2 = self.Maxpool1(e1)
e2 = self.Conv2(e2)
e3 = self.Maxpool2(e2)
e3 = self.Conv3(e3)
e4 = self.Maxpool3(e3)
e4 = self.Conv4(e4)
e5 = self.Maxpool4(e4)
e5 = self.Conv5(e5)
d5 = self.Up5(e5)
d5 = torch.cat((e4, d5), dim=1)
d5 = self.Up_conv5(d5)
d4 = self.Up4(d5)
d4 = torch.cat((e3, d4), dim=1)
d4 = self.Up_conv4(d4)
d3 = self.Up3(d4)
d3 = torch.cat((e2, d3), dim=1)
d3 = self.Up_conv3(d3)
d2 = self.Up2(d3)
d2 = torch.cat((e1, d2), dim=1)
d2 = self.Up_conv2(d2)
out = self.Conv(d2)
#d1 = self.active(out)
return out
|
GHData/bigmb_Unet-Segmentation-Pytorch-Nest-of-Unets/Models.py: 213-251
def forward(self, x):
e1 = self.RRCNN1(x)
e2 = self.Maxpool(e1)
e2 = self.RRCNN2(e2)
e3 = self.Maxpool1(e2)
e3 = self.RRCNN3(e3)
e4 = self.Maxpool2(e3)
e4 = self.RRCNN4(e4)
e5 = self.Maxpool3(e4)
e5 = self.RRCNN5(e5)
d5 = self.Up5(e5)
d5 = torch.cat((e4, d5), dim=1)
d5 = self.Up_RRCNN5(d5)
d4 = self.Up4(d5)
d4 = torch.cat((e3, d4), dim=1)
d4 = self.Up_RRCNN4(d4)
d3 = self.Up3(d4)
d3 = torch.cat((e2, d3), dim=1)
d3 = self.Up_RRCNN3(d3)
d2 = self.Up2(d3)
d2 = torch.cat((e1, d2), dim=1)
d2 = self.Up_RRCNN2(d2)
out = self.Conv(d2)
# out = self.active(out)
return out
|
GHData/zhenming33_RAN_torch/PVANet.py: 197-225
def forward(self, input):
#input shape : (32, 3, 224, 224)
conv1_1 = self.conv1_1(input) #(32, 32, 74, 74)
pool1_1 = self.pool1_1(conv1_1) #(32, 32, 37, 37)
conv2_1 = self.conv2_1(pool1_1) #(32, 64, 37, 37)
conv2_2 = self.conv2_2(conv2_1) #(32, 64, 37, 37)
conv2_3 = self.conv2_3(conv2_2) #(32, 64, 37, 37)
scale3_1 = self.scale3_1(conv2_3) #(32, 64, 37, 37)
conv3_1 = self.conv3_1(scale3_1) #(32, 128, 19, 19)
conv3_2 = self.conv3_2(conv3_1) #(32, 128, 19, 19)
conv3_3 = self.conv3_3(conv3_2) #(32, 128, 19, 19)
conv3_4 = self.conv3_4(conv3_3) #(32, 128, 19, 19)
downscale = self.downscale(conv3_4) #(32, 128, 10, 10)
conv4_1 = self.conv4_1(conv3_4) #(32, 256, 10, 10)
conv4_2 = self.conv4_2(conv4_1) #(32, 256, 10, 10)
conv4_3 = self.conv4_3(conv4_2) #(32, 256, 10, 10)
conv4_4 = self.conv4_4(conv4_3) #(32, 256, 10, 10)
conv5_1 = self.conv5_1(conv4_4) #(32, 384, 5, 5)
conv5_2 = self.conv5_2(conv5_1) #(32, 384, 5, 5)
conv5_3 = self.conv5_3(conv5_2) #(32, 384, 5, 5)
conv5_4 = self.conv5_4(conv5_3) #(32, 384, 5, 5)
bsr = self.bsr(conv5_4) #(32, 384, 5, 5)
upscale = self.upscale(bsr)
concat = torch.cat((downscale, conv4_4, upscale), 1)
convf =self.convf(concat)
return convf
|
GHData/zhoudaxia233_PyTorch-Unet/unet.py: 53-87
def forward(self, x):
conv1 = self.conv1(x)
x = self.maxpool(conv1)
conv2 = self.conv2(x)
x = self.maxpool(conv2)
conv3 = self.conv3(x)
x = self.maxpool(conv3)
conv4 = self.conv4(x)
x = self.maxpool(conv4)
bottleneck = self.bottleneck(x)
x = self.up_conv5(bottleneck)
x = torch.cat([x, conv4], dim=1)
x = self.conv5(x)
x = self.up_conv6(x)
x = torch.cat([x, conv3], dim=1)
x = self.conv6(x)
x = self.up_conv7(x)
x = torch.cat([x, conv2], dim=1)
x = self.conv7(x)
x = self.up_conv8(x)
x = torch.cat([x, conv1], dim=1)
x = self.conv8(x)
x = self.conv9(x)
x = self.sigmoid(x)
return x
|
GHData/JimpeiYamamoto_myTorch/myResNet50.py: 294-326
def _forward_impl(self, x):
# See note [TorchScript super()]
img, temp = x[0], x[1]
x = self.conv1(img)
x = self.bn1(x)
x = self.relu1(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
#x = self.fc1(x)
'''
custom↓
'''
x = self.relu2(x)
x = self.fc1(x)
#reshapeの32はバッチサイズ
temp = temp.reshape(32, 1).float()
cat = torch.cat((x,temp),1)
x = self.fc_plus(cat)
x = self.relu3(x)
x = self.dropout1(x)
x = self.fc2(x)
x = self.dropout2(x)
x = self.bn2(x)
x = self.fc3(x)
return x
|
GHData/xiuyuan0216_GoogleNet-PyTorch/model.py: 51-79
def forward(self, x):
out = self.conv1(x)
out = self.maxpool1(out)
out = self.conv2(out)
out = self.conv3(out)
out = self.maxpool2(out)
out = self.inception3a(out)
out = self.inception3b(out)
out = self.maxpool3(out)
out = self.inception4a(out)
aux1 = self.aux1(out)
out = self.inception4b(out)
out = self.inception4c(out)
out = self.inception4d(out)
aux2 = self.aux2(out)
out = self.inception4e(out)
out = self.maxpool4(out)
out = self.inception5a(out)
out = self.inception5b(out)
out = self.avgpool(out)
out = out.view(-1, 1024*1*1)
out = self.dropout(out)
out = self.fc(out)
return out, aux1, aux2
|
GHData/MIVRC_MLEFGN-PyTorch/mlegn.py: 141-176
def forward(self, x):
noise_map = self.noise_head(x)
edge = self.Edge_Net(x)
edge_map = self.edge_head(edge)
##特征编码
image_feature_1 = self.image_feature(noise_map)
edge_feature_1 = self.edge_feature(edge_map)
##第一次edge attention
leve_l = image_feature_1 + edge_feature_1
image_feature_2 = self.image_rg_1(image_feature_1)
edge_feature_2 = self.edge_rg_1(edge_feature_1)
##第二次edge guided
leve_2 = image_feature_2 + edge_feature_2
image_feature_3 = self.image_rg_2(edge_feature_2)
edge_feature_3 = self.edge_rg_2(edge_feature_2)
##第三次edge guided
leve_3 = image_feature_3 + edge_feature_3
cat_1 = torch.cat([leve_l, leve_2], 1)
cat_1 = self.fusion_1(cat_1)
cat_1 = self.cat_rg_1(cat_1)
cat_2 = torch.cat([leve_2, leve_3], 1)
cat_2 = self.fusion_2(cat_2)
cat_2 = self.cat_rg_2(cat_2)
cat_3 = torch.cat([cat_1, cat_2], 1)
cat_3 = self.fusion_3(cat_3)
out = self.cat_rg_3(cat_3)
out = self.tail(out)
return edge, out
|
GHData/SiavashCS_torch_gan/calc_inception.py: 19-50
def forward(self, x):
if x.shape[2] != 299 or x.shape[3] != 299:
x = F.interpolate(x, size=(299, 299), mode="bilinear", align_corners=True)
x = self.Conv2d_1a_3x3(x) # 299 x 299 x 3
x = self.Conv2d_2a_3x3(x) # 149 x 149 x 32
x = self.Conv2d_2b_3x3(x) # 147 x 147 x 32
x = F.max_pool2d(x, kernel_size=3, stride=2) # 147 x 147 x 64
x = self.Conv2d_3b_1x1(x) # 73 x 73 x 64
x = self.Conv2d_4a_3x3(x) # 73 x 73 x 80
x = F.max_pool2d(x, kernel_size=3, stride=2) # 71 x 71 x 192
x = self.Mixed_5b(x) # 35 x 35 x 192
x = self.Mixed_5c(x) # 35 x 35 x 256
x = self.Mixed_5d(x) # 35 x 35 x 288
x = self.Mixed_6a(x) # 35 x 35 x 288
x = self.Mixed_6b(x) # 17 x 17 x 768
x = self.Mixed_6c(x) # 17 x 17 x 768
x = self.Mixed_6d(x) # 17 x 17 x 768
x = self.Mixed_6e(x) # 17 x 17 x 768
x = self.Mixed_7a(x) # 17 x 17 x 768
x = self.Mixed_7b(x) # 8 x 8 x 1280
x = self.Mixed_7c(x) # 8 x 8 x 2048
x = F.avg_pool2d(x, kernel_size=8) # 8 x 8 x 2048
return x.view(x.shape[0], x.shape[1]) # 1 x 1 x 2048
|
GHData/yoshua133_low-dose-pet/unet.py: 125-137
def forward(self, x):
input = x[:,self.num_slice,:,:].view(x.shape[0],1,x.shape[2],x.shape[3])
x1 = self.inc(x)
x2 = self.down1(x1)
x3 = self.down2(x2)
x4 = self.down3(x3)
x = self.up1(x4, x3)
x = self.up2(x, x2)
x = self.up3(x, x1)
x = self.outc(x)
x = self.act(x)
x=torch.add(x,input)
return x,input
|
GHData/CalumMacLellan1995_U-net-PyTorch/unet.py: 22-33
def forward(self, x):
x1 = self.inc(x)
x2 = self.down1(x1)
x3 = self.down2(x2)
x4 = self.down3(x3)
x5 = self.down4(x4)
x = self.up1(x5, x4)
x = self.up2(x, x3)
x = self.up3(x, x2)
x = self.up4(x, x1)
x = self.outc(x)
return F.log_softmax(x, dim=1)
|
GHData/hiyouga_Image-Segmentation-PyTorch/model.py: 20-33
def forward(self, x):
x1 = self.inc(x)
x2 = self.down1(x1)
x3 = self.down2(x2)
x4 = self.down3(x3)
x5 = self.down4(x4)
x = self.up1(x5, x4)
x = self.up2(x, x3)
x = self.up3(x, x2)
x = self.up4(x, x1)
x = self.outc(x)
return torch.sigmoid(x)
|
GHData/tobyshooters_balloons/unet.py: 95-110
def forward(self, *args):
x = torch.cat(args, dim=1)
xi = self.in_conv(x)
d1 = self.down1(xi)
d2 = self.down2(d1)
d3 = self.down3(d2)
d4 = self.down4(d3)
u4 = self.up4(d4, d3)
u3 = self.up3(u4, d2)
u2 = self.up2(u3, d1)
u1 = self.up1(u2, xi)
y = self.out_conv(u1)
return torch.sigmoid(y)
|
GHData/agrechnev_torch-fun1/my_mnist.py: 23-36
def forward(self, x):
x = F.relu(self.conv1(x))
x = self.conv2(x)
x = F.max_pool2d(x, 2)
x = self.dropout1(x)
x = torch.flatten(x, 1)
x = F.relu(self.fc1(x))
x = self.dropout2(x)
x = self.fc2(x)
out = F.log_softmax(x, dim=1)
return out
########################################################################################################################
|
GHData/agrechnev_torch-fun1/my_cif10.py: 21-34
def forward(self, x):
x = F.relu(self.conv1(x))
x = self.conv2(x)
x = F.max_pool2d(x, 2)
x = self.dropout1(x)
x = torch.flatten(x, 1)
x = F.relu(self.fc1(x))
x = self.dropout2(x)
x = self.fc2(x)
out = F.log_softmax(x, dim=1)
return out
########################################################################################################################
|
GHData/whzhangg_torch_examples/mlp.py: 93-104
def forward(self, x):
for conv in self.convolutions:
x = conv(x)
x = self.pooling(x)
x = torch.flatten(x, 1)
x = self.fc1(x)
x = F.relu(x)
x = self.dropout(x)
x = self.fc2(x)
output = F.log_softmax(x, dim=1)
return output
|
GHData/acholston_PyTorch_Exercises/Ex11-1b.py: 167-184
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch3x3 = self.branch3x3_1(x)
branch3x3 = [self.branch3x3_2(branch3x3), self.branch3x3_3(branch3x3)]
branch3x3_2 = self.branch3x3_2_1(x)
branch3x3_2 = self.branch3x3_2_2(branch3x3_2)
branch3x3_2 = self.branch3x3_2_3(branch3x3_2)
branch3x3_2 = [self.branch3x3_2_4(branch3x3_2), self.branch3x3_2_5(branch3x3_2)]
branch_pool = F.avg_pool2d(x, kernel_size=3, padding=1, stride=1)
branch_pool = self.branch_pool(branch_pool)
outputs = torch.cat([branch1x1, torch.cat(branch3x3, 1), torch.cat(branch3x3_2, 1), branch_pool], 1)
return outputs
|
GHData/dyhan0920_PyramidNet-PyTorch/preresnet.py: 161-192
def forward(self, x):
if self.dataset == 'cifar10' or self.dataset == 'cifar100':
x = self.conv1(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.bn1(x)
x = self.relu(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
elif self.dataset == 'imagenet':
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.bn2(x)
x = self.relu(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
|
GHData/Chaiyanchong_CutMix-PyTorch-master/pyramidnet.py: 197-229
def forward(self, x):
if self.dataset == 'cifar10' or self.dataset == 'cifar100':
x = self.conv1(x)
x = self.bn1(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.bn_final(x)
x = self.relu_final(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
elif self.dataset == 'imagenet':
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.bn_final(x)
x = self.relu_final(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
|
GHData/clovaai_CutMix-PyTorch/pyramidnet.py: 197-229
def forward(self, x):
if self.dataset == 'cifar10' or self.dataset == 'cifar100':
x = self.conv1(x)
x = self.bn1(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.bn_final(x)
x = self.relu_final(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
elif self.dataset == 'imagenet':
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.bn_final(x)
x = self.relu_final(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
|
GHData/curaai_pix2pix-torch/network.py: 96-134
def forward(self, x):
out0_0 = self.layer0_0(x)
out0_1 = self.layer0_1(out0_0)
out1_0 = self.layer1_0(out0_1)
out1_1 = self.layer1_1(out1_0)
out2_0 = self.layer2_0(out1_1)
out2_1 = self.layer2_1(out2_0)
out3_0 = self.layer3_0(out2_1)
out3_2 = self.layer3_2(out3_0)
out4_0 = self.layer4_0(out3_2)
out4_2 = self.layer4_2(out4_0)
cat_out5_2 = torch.cat((out4_2, self.pooling(out3_2)), 1)
dout4_0 = self.dlayer4_0(cat_out5_2)
dout4_2 = self.dlayer4_2(dout4_0)
cat_out4_2 = torch.cat((dout4_2, out3_2), 1)
dout3_0 = self.dlayer3_0(cat_out4_2)
dout3_2 = self.dlayer3_2(dout3_0)
cat_out3_2 = torch.cat((dout3_2, out2_1), 1)
dout2_0 = self.dlayer2_0(cat_out3_2)
dout2_1 = self.dlayer2_1(dout2_0)
cat_out2_1 = torch.cat((dout2_1, out1_1), 1)
dout1_0 = self.dlayer1_0(cat_out2_1)
dout1_1 = self.dlayer1_1(dout1_0)
cat_out1_1 = torch.cat((dout1_1, out0_1), 1)
dout0_0 = self.dlayer0_0(cat_out1_1)
dout0_1 = self.dlayer0_1(dout0_0)
return dout0_1
|
GHData/dyhan0920_PyramidNet-PyTorch/PyramidNet.py: 200-232
def forward(self, x):
if self.dataset == 'cifar10' or self.dataset == 'cifar100':
x = self.conv1(x)
x = self.bn1(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.bn_final(x)
x = self.relu_final(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
elif self.dataset == 'imagenet':
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.bn_final(x)
x = self.relu_final(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
|
GHData/bigmb_Unet-Segmentation-Pytorch-Nest-of-Unets/Models.py: 330-374
def forward(self, x):
e1 = self.Conv1(x)
e2 = self.Maxpool1(e1)
e2 = self.Conv2(e2)
e3 = self.Maxpool2(e2)
e3 = self.Conv3(e3)
e4 = self.Maxpool3(e3)
e4 = self.Conv4(e4)
e5 = self.Maxpool4(e4)
e5 = self.Conv5(e5)
#print(x5.shape)
d5 = self.Up5(e5)
#print(d5.shape)
x4 = self.Att5(g=d5, x=e4)
d5 = torch.cat((x4, d5), dim=1)
d5 = self.Up_conv5(d5)
d4 = self.Up4(d5)
x3 = self.Att4(g=d4, x=e3)
d4 = torch.cat((x3, d4), dim=1)
d4 = self.Up_conv4(d4)
d3 = self.Up3(d4)
x2 = self.Att3(g=d3, x=e2)
d3 = torch.cat((x2, d3), dim=1)
d3 = self.Up_conv3(d3)
d2 = self.Up2(d3)
x1 = self.Att2(g=d2, x=e1)
d2 = torch.cat((x1, d2), dim=1)
d2 = self.Up_conv2(d2)
out = self.Conv(d2)
# out = self.active(out)
return out
|
GHData/bigmb_Unet-Segmentation-Pytorch-Nest-of-Unets/Models.py: 418-461
def forward(self, x):
e1 = self.RRCNN1(x)
e2 = self.Maxpool1(e1)
e2 = self.RRCNN2(e2)
e3 = self.Maxpool2(e2)
e3 = self.RRCNN3(e3)
e4 = self.Maxpool3(e3)
e4 = self.RRCNN4(e4)
e5 = self.Maxpool4(e4)
e5 = self.RRCNN5(e5)
d5 = self.Up5(e5)
e4 = self.Att5(g=d5, x=e4)
d5 = torch.cat((e4, d5), dim=1)
d5 = self.Up_RRCNN5(d5)
d4 = self.Up4(d5)
e3 = self.Att4(g=d4, x=e3)
d4 = torch.cat((e3, d4), dim=1)
d4 = self.Up_RRCNN4(d4)
d3 = self.Up3(d4)
e2 = self.Att3(g=d3, x=e2)
d3 = torch.cat((e2, d3), dim=1)
d3 = self.Up_RRCNN3(d3)
d2 = self.Up2(d3)
e1 = self.Att2(g=d2, x=e1)
d2 = torch.cat((e1, d2), dim=1)
d2 = self.Up_RRCNN2(d2)
out = self.Conv(d2)
# out = self.active(out)
return out
#For nested 3 channels are required
|
GHData/tstandley_Xception-PyTorch/xception.py: 164-201
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.relu(x)
x = self.block1(x)
x = self.block2(x)
x = self.block3(x)
x = self.block4(x)
x = self.block5(x)
x = self.block6(x)
x = self.block7(x)
x = self.block8(x)
x = self.block9(x)
x = self.block10(x)
x = self.block11(x)
x = self.block12(x)
x = self.conv3(x)
x = self.bn3(x)
x = self.relu(x)
x = self.conv4(x)
x = self.bn4(x)
x = self.relu(x)
x = F.adaptive_avg_pool2d(x, (1, 1))
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
|
GHData/Windxy_Classic_Network_PyTorch/InceptionV3.py: 229-295
def forward(self, x):
# 输入299 x 299 x 3
x = self.Conv2d_1a_3x3(x)
# 149 x 149 x 32
x = self.Conv2d_2a_3x3(x)
# 147 x 147 x 32
x = self.Conv2d_2b_3x3(x)
# 147 x 147 x 64
x = F.max_pool2d(x, kernel_size=3, stride=2)
# 73 x 73 x 64
x = self.Conv2d_3b_1x1(x)
# 73 x 73 x 80
x = self.Conv2d_4a_3x3(x)
# 71 x 71 x 192
x = F.max_pool2d(x, kernel_size=3, stride=2)
# 35 x 35 x 192
# 经过3次InceptionA结构
x = self.Mixed_5b(x)
# 经过一个InceptionA,得到输入大小不变,通道数为224+pool_features=32的特征图
# 35 x 35 x 256
x = self.Mixed_5c(x)
# 经过一个InceptionA,得到输入大小不变,通道数为224+pool_features=64的特征图
# 35 x 35 x 288
x = self.Mixed_5d(x)
# 经过一个InceptionA,得到输入大小不变,通道数为224+pool_features=64的特征图
# 35 x 35 x 288
# 经过1次InceptionB结构
x = self.Mixed_6a(x)
# 经过一个InceptionB,得到输入大小减半,通道数+480的特征图
# 17 x 17 x 768
# 经过4次InceptionC结构
x = self.Mixed_6b(x)
# 经过一个InceptionC,得到输入大小不变,通道数为768的特征图。
# 17 x 17 x 768
x = self.Mixed_6c(x)
# 17 x 17 x 768
x = self.Mixed_6d(x)
# 17 x 17 x 768
x = self.Mixed_6e(x)
# 17 x 17 x 768
# 辅助分类器AuxLogits以经过最后一个InceptionC的输出为输入
if self.training and self.aux_logits:
aux = self.AuxLogits(x)
# 17 x 17 x 768
# 经过1次InceptionD
x = self.Mixed_7a(x)
# 经过一个InceptionD,得到输入大小减半,通道数+512的特征图
# 8 x 8 x 1280
# 经过2次InceptionE
x = self.Mixed_7b(x)
# 经过一个InceptionE,得到输入大小不变,通道数为2048的特征图
# 8 x 8 x 2048
x = self.Mixed_7c(x)
# 经过一个InceptionE,得到输入大小不变,通道数为2048的特征图
# 8 x 8 x 2048
x = F.avg_pool2d(x, kernel_size=8)
# 1 x 1 x 2048
x = F.dropout(x, training=self.training)
# 1 x 1 x 2048
x = x.view(x.size(0), -1)
# 2048
x = self.fc(x)
# 1000 (num_classes)
if self.training and self.aux_logits:#只在训练过程进行辅助分支并输出
return x, aux
return x
|
GHData/shanemankiw_pydnet-torch/pydnet.py: 141-181
def forward(self, x):
pyramid = self.pyramid(x)
# SCALE 6
conv6 = self.conv6(pyramid[5])
disp7 = self.disp7_layer(conv6)
upconv6 = self.upconv6(conv6)
# SCALE 5
map5 = torch.cat([pyramid[4],upconv6],1)
conv5 = self.conv5(map5)
disp6 = self.disp6_layer(conv5)
upconv5 = self.upconv5(conv5)
# SCALE 4
map4 = torch.cat([pyramid[3],upconv5],1)
conv4 = self.conv4(map4)
disp5 = self.disp5_layer(conv4)
upconv4 = self.upconv4(conv4)
# SCALE 3
map3 = torch.cat([pyramid[2],upconv4],1)
conv3 = self.conv3(map3)
disp4 = self.disp4_layer(conv3)
upconv3 = self.upconv6(conv3)
# SCALE 2
map2 = torch.cat([pyramid[1],upconv3],1)
conv2 = self.conv2(map2)
disp3 = self.disp3_layer(conv2)
upconv2 = self.upconv6(conv2)
# SCALE 1
map1 = torch.cat([pyramid[0],upconv2],1)
conv1 = self.conv1(map1)
disp2 = self.disp2_layer(conv1)
upconv1 = self.upconv6(conv1)
return disp2, disp3, disp4, disp5, disp6, disp7
|
GHData/Windxy_Classic_Network_PyTorch/GoogLeNet.py: 47-99
def forward(self,x):
#N*3*224*224
x = self.conv1(x)
# N*64*112*112
x = self.maxpool1(x)
# N*64*56*56
x = self.conv2(x)
# N*64*56*56
x = self.conv3(x)
# N*192*56*56
x = self.maxpool2(x)
# N*192*28*28
x = self.inception3a(x)
# N*256*28*28
x = self.inception3b(x)
# N*480*28*28
x = self.maxpool3(x)
# N*480*14*14
x = self.inception4a(x)
# N*512*14*14
if self.training and self.aux_logits: #只在训练过程进行辅助分支
aux1 = self.aux1(x)
x = self.inception4b(x)
# N*512*14*14
x = self.inception4c(x)
# N*512*14*14
x = self.inception4d(x)
# N*528*14*14
if self.training and self.aux_logits: #只在训练过程进行辅助分支
aux2 = self.aux2(x)
x = self.inception4e(x)
# N*832*14*14
x = self.maxpool4(x)
# N*832*7*7
x = self.inception5a(x)
# N*832*7*7
x = self.inception5b(x)
# N*1024*7*7
x = self.avgpool1(x)
# N*1024*1*1
x = torch.flatten(x,1)
# N*1024
x = self.dropout(x)
x = self.fc(x)
#N*1000(num_classes)
if self.training and self.aux_logits: #只在训练过程进行辅助分支并输出
return x,aux1,aux2
return x
|
GHData/Kodamayuto2001_PyTorchAlexNet/test1.py: 259-293
def forward(self,x):
#print(x.size())
x = self.conv1(x)
x = torch.relu(x)
x = self.bn1(x)
#print(x.size())
x = self.pool1(x)
#print(x.size())
x = self.conv2(x)
x = torch.relu(x)
x = self.bn2(x)
#print(x.size())
x = self.pool2(x)
#print(x.size())
x = self.conv3(x)
x = torch.relu(x)
x = self.bn3(x)
x = self.conv4(x)
x = torch.relu(x)
x = self.bn4(x)
x = self.conv5(x)
x = torch.relu(x)
x = self.bn5(x)
x = self.pool3(x)
#print(x.size())
x = x.view(-1,384*6*6)
x = self.fc1(x)
x = torch.relu(x)
x = self.fc2(x)
x = torch.relu(x)
x = self.fc3(x)
return F.log_softmax(x,dim=1)
|
GHData/dongheehand_MemoPainter-PyTorch/generator.py: 38-81
def forward(self, x, color_feat):
### AdaIn params
adain_params = self.mlp(color_feat)
self.assign_adain_params(adain_params, self.layers)
### Encoder
e1 = self.e1(x)
e2 = self.e2(e1)
e3 = self.e3(e2)
e4 = self.e4(e3)
e5 = self.e5(e4)
e6 = self.e6(e5)
e7 = self.e7(e6)
e8 = self.e8(e7)
### Decoder
d1_ = self.d1(e8)
d1 = torch.cat([d1_, e7], dim = 1)
d2_ = self.d2(d1)
d2 = torch.cat([d2_, e6], dim = 1)
d3_ = self.d3(d2)
d3 = torch.cat([d3_, e5], dim = 1)
d4_ = self.d4(d3)
d4 = torch.cat([d4_, e4], dim = 1)
d5_ = self.d5(d4)
d5 = torch.cat([d5_, e3], dim = 1)
d6_ = self.d6(d5)
d6 = torch.cat([d6_, e2], dim = 1)
d7_ = self.d7(d6)
d7 = torch.cat([d7_, e1], dim = 1)
d8 = self.d8(d7)
output = self.tanh(d8)
return output
|
GHData/Smorodov_PRNet_PyTorch_v2/resfcn256.py: 138-171
def forward(self, x):
se = self.block0(x) # 256 x 256 x 16
se = self.block1(se) # 128 x 128 x 32
se = self.block2(se) # 128 x 128 x 32
se = self.block3(se) # 64 x 64 x 64
se = self.block4(se) # 64 x 64 x 64
se = self.block5(se) # 32 x 32 x 128
se = self.block6(se) # 32 x 32 x 128
se = self.block7(se) # 16 x 16 x 256
se = self.block8(se) # 16 x 16 x 256
se = self.block9(se) # 8 x 8 x 512
se = self.block10(se) # 8 x 8 x 512
pd = self.upsample0(se) # 8 x 8 x 512
pd = self.upsample1(pd) # 16 x 16 x 256
pd = self.upsample2(pd) # 16 x 16 x 256
pd = self.upsample3(pd) # 16 x 16 x 256
pd = self.upsample4(pd) # 32 x 32 x 128
pd = self.upsample5(pd) # 32 x 32 x 128
pd = self.upsample6(pd) # 32 x 32 x 128
pd = self.upsample7(pd) # 64 x 64 x 64
pd = self.upsample8(pd) # 64 x 64 x 64
pd = self.upsample9(pd) # 64 x 64 x 64
pd = self.upsample10(pd) # 128 x 128 x 32
pd = self.upsample11(pd) # 128 x 128 x 32
pd = self.upsample12(pd) # 256 x 256 x 16
pd = self.upsample13(pd) # 256 x 256 x 16
pd = self.upsample14(pd) # 256 x 256 x 3
pd = self.upsample15(pd) # 256 x 256 x 3
pos = self.upsample16(pd) # 256 x 256 x 3
#pos = self.sigmoid(pos)
return pos
|
GHData/preduct0r_Kears_to_Torch/torch_cnn_model.py: 112-144
def forward(self, x, hidden):
out = self.fc1(x)
out = self.bn_1(out)
out = self.relu1(out)
out = self.mp1(out)
out = self.fc2(out)
out = self.bn_2(out)
out = self.relu2(out)
out = self.mp2(out)
out = self.fc3(out)
out = self.bn_3(out)
out = self.relu3(out)
out = self.mp3(out)
out = self.fc4(out)
out = self.bn_4(out)
out = self.relu4(out)
out = self.mp4(out)
out = out.permute(0,2,1)
out, hidden = self.lstm1(out, hidden)
out, hidden = self.lstm2(out, hidden)
out = out.permute(0, 2, 1)
out = self.gap(out).squeeze()
out = self.dropout(out)
out = self.linear(out)
out = self.bn_final(out)
return out, hidden
|
GHData/polyuxdq_U-Net-PyTorch/model.py: 83-120
def forward(self, in_data):
channel_dimension = 1 # check it
map0 = self.ec0(in_data)
concat0 = self.ec1(map0)
map1 = self.pool0(concat0)
map2 = self.ec2(map1)
concat1 = self.ec3(map2) # del
map3 = self.pool1(concat1)
map4 = self.ec4(map3)
concat2 = self.ec5(map4)
map5 = self.pool2(concat2)
map6 = self.ec6(map5)
out3 = self.ec7(map6)
up_map6 = self.up2(out3)
concat2_crop = self.center_crop(concat2, up_map6)
up_map6 = torch.cat((up_map6, concat2_crop), channel_dimension)
up_map5 = self.dc6(up_map6)
out2 = self.dc5(up_map5)
up_map4 = self.up1(out2)
concat1_crop = self.center_crop(concat1, up_map4)
up_map4 = torch.cat((up_map4, concat1_crop), channel_dimension)
up_map3 = self.dc4(up_map4)
out1 = self.dc3(up_map3)
up_map2 = self.up0(out1)
concat0_crop = self.center_crop(concat0, up_map2)
up_map2 = torch.cat((up_map2, concat0_crop), channel_dimension)
up_map1 = self.dc2(up_map2)
up_map0 = self.dc1(up_map1)
out0 = self.dc0(up_map0)
return out0 # out1, out2, out3
|
GHData/XueJiang16_ssl-torch/net.py: 167-203
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
# x = self.maxpool(x)
out = self.conv2(x)
out = self.bn2(out)
out = self.relu(out)
out = self.dropout(out)
out = self.conv3(out)
residual = self.downsample(x)
out += residual
x = self.relu(out)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
# x = self.layer5(x)
x = self.bn_final(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
if self.classification:
x = self.fc1(x)
x = self.bn3(x)
x = self.relu(x)
x = self.dropout(x)
x = self.fc2(x)
x = self.bn4(x)
x = self.relu(x)
x = self.dropout(x)
x = self.fc3(x)
# x = self.softmax(x)
return x
|
GHData/chuanli11_WCT-PyTorch/ae.py: 453-485
def forward(self,x):
# decoder
out = self.reflecPad11(x)
out = self.conv11(out)
out = self.relu11(out)
out = self.unpool(out)
out = self.reflecPad12(out)
out = self.conv12(out)
out = self.relu12(out)
out = self.reflecPad13(out)
out = self.conv13(out)
out = self.relu13(out)
out = self.reflecPad14(out)
out = self.conv14(out)
out = self.relu14(out)
out = self.reflecPad15(out)
out = self.conv15(out)
out = self.relu15(out)
out = self.unpool2(out)
out = self.reflecPad16(out)
out = self.conv16(out)
out = self.relu16(out)
out = self.reflecPad17(out)
out = self.conv17(out)
out = self.relu17(out)
out = self.unpool3(out)
out = self.reflecPad18(out)
out = self.conv18(out)
out = self.relu18(out)
out = self.reflecPad19(out)
out = self.conv19(out)
return out
|
GHData/MouxiaoHuang_myPRNet-PyTorch/ResFCN256.py: 127-160
def forward(self, x):
se = self.block0(x) # 256 x 256 x 16
se = self.block1(se) # 128 x 128 x 32
se = self.block2(se) # 128 x 128 x 32
se = self.block3(se) # 64 x 64 x 64
se = self.block4(se) # 64 x 64 x 64
se = self.block5(se) # 32 x 32 x 128
se = self.block6(se) # 32 x 32 x 128
se = self.block7(se) # 16 x 16 x 256
se = self.block8(se) # 16 x 16 x 256
se = self.block9(se) # 8 x 8 x 512
se = self.block10(se) # 8 x 8 x 512
pd = self.upsample0(se) # 8 x 8 x 512
pd = self.upsample1(pd) # 16 x 16 x 256
pd = self.upsample2(pd) # 16 x 16 x 256
pd = self.upsample3(pd) # 16 x 16 x 256
pd = self.upsample4(pd) # 32 x 32 x 128
pd = self.upsample5(pd) # 32 x 32 x 128
pd = self.upsample6(pd) # 32 x 32 x 128
pd = self.upsample7(pd) # 64 x 64 x 64
pd = self.upsample8(pd) # 64 x 64 x 64
pd = self.upsample9(pd) # 64 x 64 x 64
pd = self.upsample10(pd) # 128 x 128 x 32
pd = self.upsample11(pd) # 128 x 128 x 32
pd = self.upsample12(pd) # 256 x 256 x 16
pd = self.upsample13(pd) # 256 x 256 x 16
pd = self.upsample14(pd) # 256 x 256 x 3
pd = self.upsample15(pd) # 256 x 256 x 3
pos = self.upsample16(pd) # 256 x 256 x 3
pos = self.sigmoid(pos)
return pos
|
GHData/ArnaudFickinger_PydNet-PyTorch/pydnet.py: 46-78
def forward(self, x):
conv1 = self.conv_ext_1(x)
conv2 = self.conv_ext_2(conv1)
conv3 = self.conv_ext_3(conv2)
conv4 = self.conv_ext_4(conv3)
conv5 = self.conv_ext_5(conv4)
conv6 = self.conv_ext_6(conv5)
conv6b = self.conv_dec_6(conv6)
disp6 = self.disp(conv6b)
conv6b = self.deconv(conv6b)
concat5 = torch.cat((conv5, conv6b), 1)
conv5b = self.conv_dec_5(concat5)
disp5 = self.disp(conv5b)
conv5b = self.deconv(conv5b)
concat4 = torch.cat((conv4, conv5b), 1)
conv4b = self.conv_dec_4(concat4)
disp4 = self.disp(conv4b)
conv4b = self.deconv(conv4b)
concat3 = torch.cat((conv3, conv4b), 1)
conv3b = self.conv_dec_3(concat3)
disp3 = self.disp(conv3b)
conv3b = self.deconv(conv3b)
concat2 = torch.cat((conv2, conv3b), 1)
conv2b = self.conv_dec_2(concat2)
disp2 = self.disp(conv2b)
conv2b = self.deconv(conv2b)
concat1 = torch.cat((conv1, conv2b), 1)
conv1b = self.conv_dec_1(concat1)
disp1 = self.disp(conv1b)
return [disp1, disp2, disp3, disp4, disp5, disp6]
|
GHData/zhenshen-mla_CIFAR10-in-PyTorch/models.py: 56-114
def forward(self, x):
# [32, 3, 32, 32]
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
# [32, 64, 32, 32]
x = self.maxpool(x)
# [32, 64, 16, 16]
# 值得注意的是,特别是在VGG中,我们的特征图经过卷积层之后的大小是不变的,只有经过最大池化层才将H×W进行减半操作,这是2014年vgg的特点
# 多次使用最大池化层很有可能造成信息的丢失。在以后提出的网络中基本上很少使用pool,或者使用修改后的池化操作。
x = self.conv2(x)
x = self.bn2(x)
x = self.relu(x)
# [32, 128, 16, 16]
x = self.maxpool(x)
# [32, 128, 8, 8]
x = self.conv3(x)
x = self.bn3(x)
x = self.relu(x)
# [32, 256, 8, 8]
x = self.conv4(x)
x = self.bn4(x)
x = self.relu(x)
# [32, 256, 8, 8]
x = self.maxpool(x)
# [32, 256, 4, 4]
x = self.conv5(x)
x = self.bn5(x)
x = self.relu(x)
# [32, 256, 4, 4]
x = self.conv6(x)
x = self.bn6(x)
x = self.relu(x)
# [32, 256, 4, 4]
x = self.maxpool(x)
# [32, 512, 2, 2]
x = self.conv7(x)
x = self.bn7(x)
x = self.relu(x)
# [32, 512, 2, 2]
x = self.conv8(x)
x = self.bn8(x)
x = self.relu(x)
# [32, 512, 2, 2]
x = self.maxpool(x)
# [32, 512, 1, 1]
# 在这里进行reshpe,将BCHW的特征图转化为高维语义信息,B×num_features
x = x.view(x.size(0), -1)
# [32, 512]
x = self.classifier(x)
return x
# ResNet18
# 在写自己定义的网络时,要继承torch框架的Module类
|
GHData/MouxiaoHuang_myPRNet-PyTorch/ResNet10.py: 74-107
def forward(self, x):
out = self.block0(x)
out = self.block2(out)
out = self.block3(out)
out = self.block4(out)
out = self.block5(out)
out = self.block6(out)
out = self.block7(out)
out = self.block8(out)
out = self.block9(out)
out = self.block10(out)
out = self.b1(out)
out = self.b2(out)
out = self.b3(out)
out = self.b4(out)
out = self.b5(out)
out = self.b6(out)
out = self.b7(out)
out = self.b8(out)
out = self.b9(out)
out = self.b10(out)
out = self.b11(out)
out = self.b12(out)
out = self.b13(out)
out = self.b14(out)
out = self.b15(out)
out = self.b16(out)
out = self.sigmoid(out)
return out
#net = ResNet10()
#print(net)
|
GHData/antonyvigouret_Text-Recognition-PyTorch/crnn.py: 36-86
def forward(self, x):
# (N, 3, 32, W) -> (N, 64, 16, W/2)
out = self.conv1(x)
out = self.relu(out)
out = self.mp1(out)
# (N, 64, 16, W/2) -> (N, 128, 8, W/4)
out = self.conv2(out)
out = self.relu(out)
out = self.mp2(out)
# (N, 128, 8, W/4) -> (N, 256, 8, W/4)
out = self.conv3(out)
out = self.relu(out)
# (N, 256, 8, W/4) -> (N, 256, 4, W/4)
out = self.conv4(out)
out = self.relu(out)
out = self.mp4(out)
# (N, 256, 4, W/4) -> (N, 512, 4, W/4)
out = self.conv5(out)
out = self.bn5(out)
out = self.relu(out)
# (N, 512, 4, W/4) -> (N, 512, 2, W/4)
out = self.conv6(out)
out = self.bn6(out)
out = self.relu(out)
out = self.mp6(out)
# (N, 512, 2, W/4) -> (N, 512, 1, W/4-)
out = self.conv7(out)
out = self.relu(out)
# (N, 512, 1, W/4-) -> (N, 512, W/4-)
out = torch.squeeze(out, dim=2)
# (t, n, 512)
out = out.permute(2, 0, 1)
# (N, W/4, 512) -> (N, W/4, 512)
out, _ = self.bidiLSTMs(out)
# (N, W/4, 512) -> (N*(W/4), nclass)
T, b, h = out.size()
out = out.view(T * b, h)
out = self.linear(out)
out = out.view(T, b, -1)
out = F.log_softmax(out, dim=2)
return out
|
GHData/acholston_PyTorch_Exercises/Ex11-1a.py: 198-215
def forward(self, x):
branch1x1 = self.branch1x1(x)
branch3x3 = self.branch3x3_1(x)
branch3x3 = [self.branch3x3_2(branch3x3), self.branch3x3_3(branch3x3)]
branch3x3_2 = self.branch3x3_2_1(x)
branch3x3_2 = self.branch3x3_2_2(branch3x3_2)
branch3x3_2 = [self.branch3x3_2_3(branch3x3_2), self.branch3x3_2_4(branch3x3_2)]
branch_pool = F.avg_pool2d(x, kernel_size=3, padding=1, stride=1)
branch_pool = self.branch_pool(branch_pool)
#concat
outputs = [branch1x1, torch.cat(branch3x3, 1), torch.cat(branch3x3_2, 1), branch_pool]
return torch.cat(outputs, 1)
|
GHData/acholston_PyTorch_Exercises/Ex11-2.py: 40-62
def forward(self, x):
#Define unit residual connection
r = x
#First Layer
x = self.conv1(x)
x = F.relu(self.b_norm1(x))
#Second Layer
x = self.conv2(x)
x = self.b_norm2(x)
#If dimensionality changes perform reduction
if self.convr != None:
r = self.b_normr(self.convr(r))
#Add Residual
x += r
#Activation
x = F.relu(x)
return x
|
GHData/G-U-N_a-PyTorch-Tutorial-to-Class-Incremental-Learning/resnet.py: 40-55
def forward(self, x):
residual = x
basicblock = self.conv_a(x)
basicblock = self.bn_a(basicblock)
basicblock = F.relu(basicblock, inplace=True)
basicblock = self.conv_b(basicblock)
basicblock = self.bn_b(basicblock)
if self.downsample is not None:
residual = self.downsample(x)
return F.relu(residual + basicblock, inplace=True)
|
GHData/acholston_PyTorch_Exercises/Ex13-2.py: 68-90
def forward(self, x):
#Define unit residual connection
r = x
#First Layer
x = self.conv1(x)
x = F.relu(self.b_norm1(x))
#Second Layer
x = self.conv2(x)
x = self.b_norm2(x)
#If dimensionality changes perform reduction
if self.convr != None:
r = self.b_normr(self.convr(r))
#Add Residual
x += r
#Activation
x = F.relu(x)
return x
|
GHData/y3sar_mushroom_torch/nn.py: 101-114
def forward(self, x):
out = self.l1(x)
out = self.bn1(self.sig(out))
out = self.l2(out)
out = self.bn2(self.sig(out))
out = self.l3(out)
out = self.bn3(self.sig(out))
out = self.l4(out)
out = self.bn4(self.sig(out))
out = self.l5(out)
return out
|
GHData/Advaiit_pyTorch_nn/s_cnn.py: 16-33
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = F.max_pool2d(x, (2, 2))
x = F.max_pool2d(F.relu(self.conv2(x)))
x = x.view(-1, self.num_flat_features(x))
x = self.fc1(x)
x = F.relu(x)
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
|
GHData/grassknoted_PyTorch-Autoencoder/Autoencoder.py: 160-171
def forward(self, z):
x = self.linear(z)
x = x.view(z.size(0), 512, 1, 1)
x = F.interpolate(x, scale_factor=16)
x = self.layer4(x)
x = self.layer3(x)
x = self.layer2(x)
x = self.layer1(x)
x = torch.sigmoid(self.conv1(x))
x = x.view(x.size(0), 1, 256, 256)
return x
|
GHData/julianstastny_VAE-ResNet18-PyTorch/model.py: 132-143
def forward(self, z):
x = self.linear(z)
x = x.view(z.size(0), 512, 1, 1)
x = F.interpolate(x, scale_factor=4)
x = self.layer4(x)
x = self.layer3(x)
x = self.layer2(x)
x = self.layer1(x)
x = torch.sigmoid(self.conv1(x))
x = x.view(x.size(0), 3, 64, 64)
return x
|
GHData/samx97_MushroomTorch/nn.py: 101-114
def forward(self, x):
out = self.l1(x)
out = self.bn1(self.sig(out))
out = self.l2(out)
out = self.bn2(self.sig(out))
out = self.l3(out)
out = self.bn3(self.sig(out))
out = self.l4(out)
out = self.bn4(self.sig(out))
out = self.l5(out)
return out
|
GHData/s-chh_PyTorch-DANN/model.py: 31-48
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = self.pool3(x)
x = F.relu(self.conv4(x))
x = F.relu(self.conv5(x))
x = self.pool6(x)
x = F.relu(self.conv7(x))
x = F.relu(self.conv8(x))
x = self.pool9(x)
x = x.view(-1, self.flat_dim)
x = F.relu(self.fc1(x))
return x
|
GHData/mrGreat1110_torch-image-classification/image_cla.py: 129-144
def forward(self,x):
x=self.a1(self.cv1(x))
x=self.a2(self.cv2(x))
x=self.maxpool1(x)
x=self.a3(self.cv3(x))
x=self.a4(self.cv4(x))
x=self.maxpool2(x)
x=self.a5(self.cv5(x))
x=self.a6(self.cv6(x))
x=self.maxpool3(x)
x=self.flat1(x)
x=self.a7(self.fc1(x))
x=self.drop(x)
x=self.sm(self.fc2(x))
return x
|
GHData/jiayongzhang_pred_CBED_PyTorch/run.py: 119-138
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = self.pool(x)
x = F.relu(self.conv3(x))
x = F.relu(self.conv4(x))
x = self.pool(x)
x = F.relu(self.conv5(x))
x = F.relu(self.conv6(x))
x = self.pool(x)
x = x.view(-1,16 * 25 * 25)
x = self.dropout(x)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
#x = F.relu(self.fc3(x))
x = self.fc3(x)
return x
|
GHData/omerunlusoy_Computer-Vision-Practices-with-PyTorch/Convolutional_Neural_Networks.py: 203-214
def forward(self, x):
x = Functional.relu(self.conv1(x)) # activation function is relu rather than sigmoid
x = Functional.max_pool2d(x, pooling_kernel_size, pooling_kernel_size)
x = Functional.relu(self.conv2(x))
x = Functional.max_pool2d(x, pooling_kernel_size, pooling_kernel_size)
x = x.view(-1, fc1_input_size) # x must be flattened before entering fully connected layer
x = Functional.relu(self.fc1(x))
x = self.dropout1(x)
x = self.fc2(x) # rather than the probability, we get score (raw output) for nn.CrossEntropyLoss
return x
|
GHData/NanWuIT_Gradient-Descent-and-PyTorch/Gradient%20Descent.py: 69-85
def forward(self, x):
x = F.relu(self.conv1(x)) # convolution it
x = self.maxp(x) # pool it
x = F.relu(self.conv2(x)) # convolution it
x = self.maxp(x) # pool it again
x = x.view(x.shape[0], -1) # make sure inputs are flattened
x = F.relu(self.fc1(x)) # change the value to 256
x = self.fc2(x) # change the value to 1
x = F.log_softmax(x, dim = 1) # preserve batch dim
return x
|
GHData/AnuExMachina_PyTorch-Titan/titan.py: 59-71
def forward(self, x):
x = F.gelu(self.dense1(x))
x = self.dropout1(x)
x = F.gelu(self.dense2(x))
x = self.dropout2(x)
x = F.gelu(self.dense3(x))
x = self.dropout3(x)
x = F.gelu(self.dense4(x))
x = self.dropout4(x)
x = F.sigmoid(self.dense5(x))
return x
|
GHData/aditya12agd5_pytorch_divcolor/vae.py: 69-79
def cond_encoder(self, x):
x = F.relu(self.cond_enc_conv1(x))
sc_feat32 = self.cond_enc_bn1(x)
x = F.relu(self.cond_enc_conv2(sc_feat32))
sc_feat16 = self.cond_enc_bn2(x)
x = F.relu(self.cond_enc_conv3(sc_feat16))
sc_feat8 = self.cond_enc_bn3(x)
x = F.relu(self.cond_enc_conv4(sc_feat8))
sc_feat4 = self.cond_enc_bn4(x)
return sc_feat32, sc_feat16, sc_feat8, sc_feat4
|
GHData/KnurpsBram_PyTorch-PatternNet/networks.py: 68-79
def forward(self, x):
batch_size = x.size(0)
x = self.a(self.conv1(x))
x = self.mp(x)
x = self.a(self.conv2(x))
x = self.mp(x)
x = self.a(self.conv3(x))
x = self.a(self.conv4(x))
x = self.a(self.dense1(x.view(batch_size, -1)))
x = self.dense2(x)
return x
|
GHData/muratonuryildirim_PyTorch_Notes/12_LeNet_from_scratch.py: 24-35
def forward(self, x):
x = self.relu(self.conv1(x))
x = self.pool(x)
x = self.relu(self.conv2(x))
x = self.pool(x)
x = self.relu(self.conv3(x))
x = x.reshape(x.shape[0], -1)
x = self.relu(self.linear1(x))
x = self.linear2(x)
return x
|
GHData/Ravitha_pyTorch_Examples/Ex6_MNISTClassification.py: 26-36
def forward(self,x):
c1= F.relu(self.conv1(x))
s1 = self.pool1(c1)
c2 = F.relu(self.conv2(s1))
s2 = self.pool2(c2)
f = (torch.nn.Flatten()(s2))
f1 = F.relu(self.linear1(f))
f2 = F.relu(self.linear2(f1))
f3 = self.linear3(f2)
return f3
|
GHData/IVPLatNU_Sample_PyTorch_Code/model.py: 90-107
def forward(self, input):
output = self.relu(self.conv_in(input))
output = self.inner_convs(output)
output = self.relu(self.conv_out(output))
'''
When you switch from convolutions to fully connected layers, PyTorch requires
that the shape of the data be adjusted (flattened).
'''
output = output.view(-1, self.output_size * self.output_size)
output = self.inner_fc1(output)
output = self.inner_fc2(output)
output = self.fc_out(output)
return output
|
GHData/dourgey_Reinforcement-Learning-Implements-With-PyTorch/NNModels.py: 17-28
def forward(self, x):
x = torch.relu(self.fc1(x))
x = torch.relu(self.fc2(x))
if self.use_dueling:
q = self.action(x)
v = self.v(x)
action = v + (q - torch.mean(q))
else:
action = self.action(x)
return action
|
GHData/agrechnev_torch-fun1/sarah1.py: 18-28
def forward(self, x):
x = F.relu(self.conv1(x))
x = self.mp1(x)
x = F.relu(self.conv2(x))
x = self.mp2(x)
x = x.view(-1, self.num_flat(x))
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
|
GHData/agrechnev_torch-fun1/torch4.py: 78-88
def forward(self, x):
x = F.relu(self.conv1(x))
x = self.pool(x)
x = F.relu(self.conv2(x))
x = self.pool(x)
x = x.view(-1, 16*5*5)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
|
GHData/tszdanger_torch_grammartest/antorbee.py: 50-61
def forward(self,x):
x = F.relu(self.conv1(x))
x = self.pool(x)
x = F.relu(self.conv2(x))
x = self.pool(x)
x = x.view(-1,image_size//4*image_size//4*depth[1])
x = F.relu(self.fc1(x))
x = F.dropout(x,training=self.training)
x = self.fc2(x)
x = F.log_softmax(x,dim=1)
return x
|
GHData/frozenparadox99_pyTorch-Course/cnn.py: 52-62
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.max_pool2d(x, 2, 2)
x = F.relu(self.conv2(x))
x = F.max_pool2d(x, 2, 2)
x = x.view(-1, 4*4*50)
x = F.relu(self.fc1(x))
x = self.dropout1(x)
x = self.fc2(x)
return x
|
GHData/AnuExMachina_PyTorch-Ligtning-Titan/titan%20lightning.py: 61-71
def forward(self, x):
x = F.gelu(self.dense1(x))
x = self.dropout1(x)
x = F.gelu(self.dense2(x))
x = self.dropout2(x)
x = F.gelu(self.dense3(x))
x = self.dropout3(x)
x = F.gelu(self.dense4(x))
x = self.dropout4(x)
x = F.sigmoid(self.dense5(x))
return x
|
GHData/ShanksVision_PyTorch_DLCV/MnistCNN.py: 25-35
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.max_pool2d(x, 2, 2)
x = F.relu(self.conv2(x))
x = F.max_pool2d(x, 2, 2)
x = x.view(-1, 1024)
x = F.relu(self.fc1(x))
x = self.drop1(x)
logits = self.fc2(x)
return logits
|
GHData/Amitahu_MNIST-with-PyTorch/model.py: 16-27
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = self.dropout1(F.max_pool2d(x, 2))
x = torch.flatten(x, 1)
x = F.relu(self.fc1(x))
x = self.dropout2(x)
x = self.fc2(x)
output = F.log_softmax(x, dim=1)
return output
|
GHData/HadasBabayov_PyTorch-neural-network/ex4.py: 123-136
def forward(self, x):
x = x.view(-1, self.image_size)
x = self.first_layer(x)
x = F.relu(self.bn1(x))
x = self.dropout1(x)
x = self.second_layer(x)
x = F.relu(self.bn2(x))
x = self.dropout2(x)
x = self.third_layer(x)
x = F.relu(self.bn3(x))
x = self.fourth_layer(x)
return F.log_softmax(x, dim=1)
# Train the model.
|
GHData/ethandrzb_UCF101-PyTorch/Model_C3D.py: 42-72
def forward(self, x):
x = self.relu(self.conv1(x))
x = self.pool1(x)
x = self.relu(self.conv2(x))
x = self.pool2(x)
x = self.relu(self.conv3a(x))
x = self.relu(self.conv3b(x))
x = self.pool3(x)
x = self.relu(self.conv4a(x))
x = self.relu(self.conv4b(x))
x = self.pool4(x)
x = self.relu(self.conv5a(x))
x = self.relu(self.conv5b(x))
x = self.pool5(x)
x = x.view(-1, 8192)
x = self.relu(self.fc6(x))
x = self.dropout(x)
x = self.relu(self.fc7(x))
x = self.dropout(x)
logits = self.fc8(x)
return logits
|
GHData/ethandrzb_UCF101-PyTorch/Model.py: 57-87
def forward(self, x):
x = self.relu(self.conv1(x))
x = self.pool1(x)
x = self.relu(self.conv2(x))
x = self.pool2(x)
x = self.relu(self.conv3a(x))
x = self.relu(self.conv3b(x))
x = self.pool3(x)
x = self.relu(self.conv4a(x))
x = self.relu(self.conv4b(x))
x = self.pool4(x)
x = self.relu(self.conv5a(x))
x = self.relu(self.conv5b(x))
x = self.pool5(x)
x = x.view(-1, 8192)
x = self.relu(self.fc6(x))
x = self.dropout(x)
x = self.relu(self.fc7(x))
x = self.dropout(x)
logits = self.fc8(x)
return logits
|
GHData/pei-hsin-chiu_PyTorchTutorials/vae_2digits_4levels_MSE.py: 66-91
def forward(self, x):
x = F.relu(self.conv1(x))
x = self.dropout(x)
x = F.relu(self.conv2(x))
x = self.dropout(x)
x = F.relu(self.conv3(x))
x = self.dropout(x)
x = F.relu(self.conv4(x))
x = self.dropout(x)
if (x.requires_grad == True) and (self.training == False):
x = self.conv5(x)
# hook the gradient
g = x.register_hook(self.activations_hook)
x = F.relu(x)
else:
x = F.relu(self.conv5(x))
# flatten batch of multi-channel feature maps to a batch of feature vectors
x = x.view(x.size(0), -1)
#x = F.relu(self.fc2(x))
x_mu = self.fc_mu(x)
x_logvar = self.fc_logvar(x)
return x_mu, x_logvar
|
GHData/ShanksVision_PyTorch_DLCV/CIFAR10CNN.py: 35-54
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.max_pool2d(x, 2, 2)
x = self.bn1(x)
x = F.relu(self.conv2(x))
x = F.max_pool2d(x, 2, 2)
x = self.bn2(x)
x = F.relu(self.conv3(x))
x = F.max_pool2d(x, 2, 2)
x = self.bn3(x)
x = F.max_pool2d(x, 2, 2)
x = self.bn4(x)
x = x.view(-1, 1024)
x = F.relu(self.fc1(x))
x = self.drop1(x)
x = F.relu(self.fc2(x))
x = self.drop2(x)
logits = self.fc3(x)
return logits
|
GHData/BizhuWu_C3D_PyTorch/C3D.py: 43-72
def forward(self, x):
x = self.relu(self.conv1(x))
x = self.pool1(x)
x = self.relu(self.conv2(x))
x = self.pool2(x)
x = self.relu(self.conv3a(x))
x = self.relu(self.conv3b(x))
x = self.pool3(x)
x = self.relu(self.conv4a(x))
x = self.relu(self.conv4b(x))
x = self.pool4(x)
x = self.relu(self.conv5a(x))
x = self.relu(self.conv5b(x))
x = self.pool5(x)
x = x.view(-1, 8192)
x = self.relu(self.fc6(x))
x = self.dropout(x)
x = self.relu(self.fc7(x))
x = self.dropout(x)
logits = self.fc8(x)
return logits
|
GHData/yawen-d_Generative_Adversarial_Networks-in-PyTorch/A6_DwG.py: 113-134
def forward(self, x):
x = self.fc1(x)
x = x.view((-1,196,4,4))
x = self.conv1(x)
x = F.relu(self.bn(x))
x = self.conv2(x)
x = F.relu(self.bn(x))
x = self.conv3(x)
x = F.relu(self.bn(x))
x = self.conv4(x)
x = F.relu(self.bn(x))
x = self.conv5(x)
x = F.relu(self.bn(x))
x = self.conv6(x)
x = F.relu(self.bn(x))
x = self.conv7(x)
x = F.relu(self.bn(x))
x = self.conv8(x)
x = torch.tanh(x)
return x
|
GHData/chakam1307_Image-classification-using-PyTorch/model.py: 153-171
def forward(self, desc):
# print(desc.shape)
x = self.relu(self.conv5_1(desc))
x = self.relu(self.conv5_2(x))
x = self.pool(self.bn1(x))
x = self.relu(self.conv6_1(x))
x = self.relu(self.conv6_2(x))
x = self.pool(self.bn2(x))
x = torch.flatten(x,1)
x = self.fc1(x)
x = self.fc2(x)
x = self.fc3(x)
return x
|
GHData/PhilippKitz_PyTorch/mnist.py: 36-49
def forward(self, x):
x = self.conv1(x)
x = F.max_pool2d(x, 2)
x = F.relu(x)
x = self.conv2(x)
x = self.conv_dropaut(x)
x = F.max_pool2d(x, 2)
x = F.relu(x)
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return F.log_softmax(x)
|
GHData/BenjaminAm_PyTorch-Tutorial/Tutorial3Cats&Dogs.py: 65-79
def forward(self, x):
x = self.conv1(x)
x = F.max_pool2d(x,2)
x = F.relu(x)
x = self.conv2(x)
x = F.max_pool2d(x,2)
x = F.relu(x)
x = self.conv3(x)
x = F.max_pool2d(x,2)
x = F.relu(x)
x = x.view(-1, 14112)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return F.sigmoid(x)
|
GHData/BenjaminAm_PyTorch-Tutorial/Tutorial2MNIST.py: 38-51
def forward(self, x):
x = self.conv1(x)
x = F.max_pool2d(x, 2)
x = F.relu(x)
x = self.conv2(x)
x = self.conv_dropout(x)
x = F.max_pool2d(x, 2)
x = F.relu(x)
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return F.log_softmax(x)
|
GHData/adityarp9_Image-Captioning-PyTorch/model.py: 32-43
def forward(self, x_img, x_cap):
x_cap = self.embd(x_cap)
self.lstm.flatten_parameters()
x_cap, _ = self.lstm(x_cap)
x_img = self.fcn1(x_img)
x_img = F.relu(x_img)
latent = torch.add(x_cap[-1], x_img)
x_vec = self.fcn2(latent)
x_vec = F.relu(x_vec)
x_vec = self.fcn3(x_vec)
return x_vec
|
GHData/kirant1008_Image-Captioning-PyTorch/model.py: 34-45
def forward(self, x_img, x_cap):
x_cap = self.embd(x_cap)
self.lstm.flatten_parameters()
x_cap, (a, b) = self.lstm(x_cap)
x_img = self.fcn1(x_img)
x_img = F.relu(x_img)
latent = torch.add(x_cap[-1], x_img)
x_vec = self.fcn2(latent)
x_vec = F.relu(x_vec)
x_vec = self.fcn3(x_vec)
return x_vec
|
GHData/AlanJacob97_Image-Captioning-PyTorch/model.py: 26-37
def forward(self, x_img, x_cap):
x_cap = self.embd(x_cap)
self.lstm.flatten_parameters()
x_cap, (a, b) = self.lstm(x_cap)
x_img = self.fcn1(x_img)
x_img = F.relu(x_img)
latent = torch.add(x_cap[-1], x_img)
x_vec = self.fcn2(latent)
x_vec = F.relu(x_vec)
x_vec = self.fcn3(x_vec)
return x_vec
|
GHData/ytZhang99_DAHDRNet-PyTorch/model.py: 45-58
def forward(self, x_i, x_r):
ca = F.adaptive_avg_pool2d(x_i, (1, 1))
ca = self.c_conv_1(ca)
ca = self.c_conv_2(ca)
ca = self.c_act(ca)
sa = torch.cat((x_i, x_r), dim=1)
sa = self.s_conv_1(sa)
sa = self.s_conv_2(sa)
att_map = ca * sa
return att_map
|
GHData/grassknoted_PyTorch-Autoencoder/Autoencoder.py: 124-137
def forward(self, x):
x = torch.relu(self.bn1(self.conv1(x)))
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = F.adaptive_avg_pool2d(x, 1)
x = x.view(x.size(0), -1)
x = self.linear(x)
mu = x[:, :self.z_dim]
logvar = x[:, self.z_dim:]
# return mu, logvar
return x
|
GHData/julianstastny_VAE-ResNet18-PyTorch/model.py: 97-109
def forward(self, x):
x = torch.relu(self.bn1(self.conv1(x)))
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = F.adaptive_avg_pool2d(x, 1)
x = x.view(x.size(0), -1)
x = self.linear(x)
mu = x[:, :self.z_dim]
logvar = x[:, self.z_dim:]
return mu, logvar
|
GHData/kooyunmo_transformer-torchscript/modeling_distilbert.py: 79-102
def forward(self, input_ids):
"""
Parameters
----------
input_ids: torch.tensor(bs, max_seq_length)
The token ids to embed.
Outputs
-------
embeddings: torch.tensor(bs, max_seq_length, dim)
The embedded tokens (plus position embeddings, no token_type embeddings)
"""
seq_length = input_ids.size(1)
position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device) # (max_seq_length)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids) # (bs, max_seq_length)
word_embeddings = self.word_embeddings(input_ids) # (bs, max_seq_length, dim)
position_embeddings = self.position_embeddings(position_ids) # (bs, max_seq_length, dim)
embeddings = word_embeddings + position_embeddings # (bs, max_seq_length, dim)
embeddings = self.LayerNorm(embeddings) # (bs, max_seq_length, dim)
embeddings = self.dropout(embeddings) # (bs, max_seq_length, dim)
return embeddings
|
GHData/mengzhu0308_EfficientNetV2-PyTorch/efficinetnetv2.py: 67-78
def forward(self, input):
x = self.stage1(input)
x = self.stage2(x)
x = self.se(x)
x = self.stage3(x)
if self.use_shortcut:
if 0 < self.survival_probability < 1:
x = F.dropout(x, p=self.survival_probability, training=self.training, inplace=True)
x = x + input
return x
|
GHData/Lorenzoncina_Sound_Classification_PyTorch/urbansounddataset.py: 24-42
def __getitem__(self, index):
audio_sample_path = self._get_audio_sample_path(index)
label = self._get_audio_sample_label(index)
#load audio data
signal, sr = torchaudio.load(audio_sample_path)
signal = signal.to(self.device) #signal registred on device
#signal -> (num_channels, samples) -> example with 2 seconds of audio at 16k sr: (2,16000)
#solve two problems: convert to mono if stereo and resample to 16000 hz
signal = self._mix_down_if_necessary(signal)
signal = self._resample_if_necessary(signal, sr)
#if the signal has less samples than those expetected -> zero padding
signal = self._right_pad_if_necessary(signal)
#if the signal has more samples of than expected -> cut it
signal = self._cut_if_necessary(signal)
#extra step: trasform the waveform to a melspectrogram
signal = self.transformation(signal) #transform is a callable object, so we can pass to it directly the audio
return signal, label
|
GHData/langmanbusi_KinD_PyTorch/model_KinD_color.py: 135-151
def forward(self, input_im):
conv1 = self.decom_conv1(input_im)
conv2 = self.decom_conv2(conv1)
conv3 = self.decom_conv3(conv2)
up1 = torch.cat((self.decom_upsample1(conv3), conv2), dim=1)
conv4 = self.decom_conv4(up1)
up2 = torch.cat((self.decom_upsample2(conv4), conv1), dim=1)
conv5 = self.decom_conv5(up2)
out_R = self.decom_out_R(conv5)
conv6 = self.decom_conv6(conv1)
out_I = self.decom_out_I(torch.cat((conv6, conv5), dim=1))
return out_R, out_I
# KinD RelightNet
|
GHData/EnrcDamn_MusicGenreClassifier-PyTorch/gtzan_dataset.py: 28-43
def __getitem__(self, index):
# mylist[0] -> mylist.__getitem__(0)
# get signal and fold index for an audio file
audio_sample_path = self._get_audio_sample_path(index)
label = self._get_audio_sample_label(index)
signal, sr = torchaudio.load(audio_sample_path)
signal = signal.to(self.device)
# apply processing and transformation to the signal
signal = self._mix_down_if_necessary(signal)
signal = self._resample_if_necessary(signal, sr)
signal = self._cut_if_necessary(signal)
signal = self._right_pad_if_necessary(signal)
# signal = self._spectrogram(signal)
signal = self.transformation(signal)
return signal, label
|
GHData/Syavaprd_mobilenet_v3/run.py: 90-104
def forward(self, x):
x2 = self.conv1(x)
x2 = self.dconv1(x2)
if self.SE:
x2 = self.squeeze(x2)
x2 = self.conv2(x2)
if self.connect_flag:
return x + x2
else:
return x2
|
GHData/SpikeKing_MobileNetV3-Classification-PyTorch/model.py: 113-131
def forward(self, x):
# MobileNetV2
out = self.conv(x)
out = self.depth_conv(out)
# Squeeze and Excite
if self.SE:
out = self.squeeze_block(out)
# point-wise conv
out = self.point_conv(out)
# connection
if self.use_connect:
return x + out
else:
return out
|
GHData/Lornatang_RFB_ESRGAN-PyTorch/model.py: 139-155
def forward(self, x: torch.Tensor) -> torch.Tensor:
shortcut = self.shortcut(x)
shortcut = torch.mul(shortcut, 0.2)
branch1 = self.branch1(x)
branch2 = self.branch2(x)
branch3 = self.branch3(x)
branch4 = self.branch4(x)
out = torch.cat([branch1, branch2, branch3, branch4], 1)
out = self.conv_linear(out)
out = torch.add(out, shortcut)
return out
# Source code reference from `https://arxiv.org/pdf/2005.12597.pdf`.
|
GHData/neuece_PyTorch_Model_Zoo/Text_BiDirectLSTM.py: 66-79
def forward(self, x):
x = self.embed(x)
x = self.dropout_embed(x)
bilstm_out, _ = self.bilstm(x)
bilstm_out = torch.transpose(bilstm_out, 0, 1)
bilstm_out = torch.transpose(bilstm_out, 1, 2)
bilstm_out = F.tanh(bilstm_out)
bilstm_out = F.max_pool1d(bilstm_out, bilstm_out.size(2)).squeeze(2)
bilstm_out = F.tanh(bilstm_out)
logit = self.fc(bilstm_out)
return logit
|
GHData/RizhaoCai_PyTorch_ONNX_TensorRT/trt_int8_demo.py: 43-56
def forward_onnx(self, X_in):
print("Function forward_onnx called! \n")
x = self.layer1(X_in)
x = self.relu(x)
x = self.max_pool(x)
x = self.layer2(x)
x = self.relu(x)
x = self.avg_pool(x)
assert self.batch_size_onnx > 0
length_of_fc_layer = 64 # For exporting an onnx model that fit the TensorRT, processes here should be DETERMINISITC!
x = x.view(self.batch_size_onnx, length_of_fc_layer) #
x = self.fc(x)
return x
|
GHData/devonsuper_PyTorch_ONNX_TensorRT/monodepth2conversion.py: 44-57
def forward_onnx(self, X_in):
print("Function forward_onnx called! \n")
x = self.layer1(X_in)
x = self.relu(x)
x = self.max_pool(x)
x = self.layer2(x)
x = self.relu(x)
x = self.avg_pool(x)
assert self.batch_size_onnx > 0
length_of_fc_layer = 64 # For exporting an onnx model that fit the TensorRT, processes here should be DETERMINISITC!
x = x.view(self.batch_size_onnx, length_of_fc_layer) #
x = self.fc(x)
return x
|
GHData/devonsuper_PyTorch_ONNX_TensorRT/trt_int8_demo.py: 43-56
def forward_onnx(self, X_in):
print("Function forward_onnx called! \n")
x = self.layer1(X_in)
x = self.relu(x)
x = self.max_pool(x)
x = self.layer2(x)
x = self.relu(x)
x = self.avg_pool(x)
assert self.batch_size_onnx > 0
length_of_fc_layer = 64 # For exporting an onnx model that fit the TensorRT, processes here should be DETERMINISITC!
x = x.view(self.batch_size_onnx, length_of_fc_layer) #
x = self.fc(x)
return x
|
GHData/CeasonCui_CNN_PyTorch/_4_cnn_fm.py: 58-73
def forward(self, x):
x = x.float()
x = x.view(-1, 1, 64, 64)
#x = x.reshape(-1, 1, 64, 64)
x = self.conv1(x)
x = self.relu(x)
x1 = x.reshape(-1, 1, 64, 64)
x = self.pool(x)
#x = self.conv2(x)
#x = self.conv3(x)
#x = self.conv4(x)
x = x.view(x.size(0), -1) # flatten the output of conv2 to (batch_size, 32 * 7 * 7)
output = self.fc1(x)
#output = self.softmax(x)
return x1,x # return x for visualization
|
GHData/CeasonCui_CNN_PyTorch/_3_cnn_okado.py: 131-148
def forward(self, x):
x = x.float()
x = x.view(-1, 1, 64, 64)
#x = x.reshape(-1, 1, 64, 64)
x = self.conv1(x)
x = self.relu(x)
x1 = x.reshape(-1, 1, 64, 64)
x = self. pool(x)
#x1 = x.reshape(-1, 1, 64, 64)
#x = self.conv2(x)
#x = self.conv3(x)
#x = self.conv4(x)
x = x.view(x.size(0), -1) # flatten the output of conv2 to (batch_size, 32 * 7 * 7)
output = self.fc1(x)
#output = self.softmax(x)
return output, x1 # return x for visualization
|
GHData/langmanbusi_KinD_PyTorch/model_KinD_color.py: 249-264
def forward(self, input_R):
out = self.conv1(input_R)
for resblock1 in self.resblocks1:
out = resblock1(out)
out = self.conv2(out)
out = self.conv3(out)
for resblock2 in self.resblocks2:
out = resblock2(out)
out = torch.cat([out, input_R], dim=1)
out = self.conv4(out)
return out
|
GHData/shimon-c_torch/LResNet.py: 94-105
def forward(self, X):
for m in self.models:
X = m(X)
X = X.view(X.shape[0],-1)
if self.lin1:
X = self.lin1(X)
X = F.relu(X)
X = self.lin2(X)
Y = F.softmax(X, dim=1)
return Y
|
GHData/jweig0ld_VAE/vae_skeleton.py: 398-411
def forward(self, x):
encoded = self.encoder(x)
mu = self.mu_layer(encoded)
logvar = self.logvar_layer(encoded)
z = self.reparameterize(mu, logvar)
z = self.transition_layer(z)
y = self.decoder(z)
if y.shape[1:] != (self.channels, self.x_dim, self.z_dim):
y = y[:, :self.channels, :self.z_dim, :self.x_dim, :self.x_dim]
probs = torch.sigmoid(y)
return y, probs, mu, logvar
|
GHData/Lornatang_IDN-PyTorch/model.py: 93-110
def _forward_impl(self, x: torch.Tensor) -> torch.Tensor:
bicubic = F.interpolate(x, scale_factor=self.upscale_factor, mode="bilinear")
out = self.conv1(x)
out = self.conv2(out)
out = self.idl1(out)
out = self.idl2(out)
out = self.idl3(out)
out = self.idl4(out)
out = self.upsample(out, output_size=bicubic.size())
out = torch.add(out, bicubic)
out = torch.clamp_(out, 0.0, 1.0)
return out
|
GHData/xiaokai11_ESRGAN-PyTorch/model.py: 189-204
def _forward_impl(self, x: torch.Tensor) -> torch.Tensor:
out1 = self.conv1(x)
out = self.trunk(out1)
out2 = self.conv2(out)
out = torch.add(out1, out2)
out = self.upsampling1(F.interpolate(out, scale_factor=2, mode="nearest"))
out = self.upsampling2(F.interpolate(out, scale_factor=2, mode="nearest"))
out = self.conv3(out)
out = self.conv4(out)
out = torch.clamp_(out, 0.0, 1.0)
return out
|
GHData/sdecoder_ESRGAN-PyTorch/model.py: 189-204
def _forward_impl(self, x: torch.Tensor) -> torch.Tensor:
out1 = self.conv1(x)
out = self.trunk(out1)
out2 = self.conv2(out)
out = torch.add(out1, out2)
out = self.upsampling1(F.interpolate(out, scale_factor=2, mode="nearest"))
out = self.upsampling2(F.interpolate(out, scale_factor=2, mode="nearest"))
out = self.conv3(out)
out = self.conv4(out)
out = torch.clamp_(out, 0.0, 1.0)
return out
|
GHData/rohitvk1_Facial-Keypoint-Detection-with-PyTorch/model.py: 39-66
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.drop1(x)
x = self.pool(F.relu(self.conv2(x)))
x = self.bn1(x)
x = self.drop2(x)
x = self.pool(F.relu(self.conv3(x)))
x = self.bn2(x)
x = self.drop3(x)
x = self.pool(F.relu(self.conv4(x)))
x = self.bn3(x)
x = self.drop4(x)
#Flatten
x = x.view(x.size(0),-1)
#Fully connected layers
x = F.relu(self.fc1(x))
x = self.bn4(x)
x = self.drop5(x)
x = F.relu(self.fc2(x))
x = self.bn5(x)
x = self.drop6(x)
x = self.fc3(x)
return x
|
GHData/mf1024_Contrastive-Predictive-Coding-for-Image-Recognition-in-PyTorch/resnet_blocks.py: 44-64
def forward(self,x):
identity = x
if self.is_downsampling_block:
identity = self.projection_shortcut(identity)
identity = self.projection_batch_norm(identity)
x = self.conv_layer_1.forward(x)
x = self.batch_norm_1(x)
x = nn.functional.relu(x)
x = self.conv_layer_2.forward(x)
x = self.batch_norm_2(x)
x = x + identity
x = nn.functional.relu(x)
return x
|
GHData/steveowk_Unet-in-PyTorch/unetModel.py: 56-91
def forward (self,image):
#encoder
in1 = self.conv1(image)
in1_pool = self.max_pool(in1)
in2 = self.conv2(in1_pool)
in2_pool = self.max_pool(in2)
in3 = self.conv3(in2_pool)
in3_pool = self.max_pool(in3)
in4 = self.conv4(in3_pool)
in4_pool = self.max_pool(in4)
in5 = self.conv5(in4_pool)
# decoder
transposed_conv1 = self.transposed_conv1(in5)
cropped_tensor1 = crop_tensor(in4,transposed_conv1)
concat1 = torch.cat([cropped_tensor1,transposed_conv1],1)
conv_concat1 = self.dconv1(concat1)
transposed_conv2 = self.transposed_conv2(conv_concat1)
cropped_tensor2 = crop_tensor(in3,transposed_conv2)
concat2 = torch.cat([cropped_tensor2,transposed_conv2],1)
conv_concat2 = self.dconv2(concat2)
transposed_conv3 = self.transposed_conv3(conv_concat2)
cropped_tensor3 = crop_tensor(in2,transposed_conv3)
concat3 = torch.cat([cropped_tensor3,transposed_conv3],1)
conv_concat3 = self.dconv3(concat3)
transposed_conv4 = self.transposed_conv4(conv_concat3)
cropped_tensor4 = crop_tensor(in1,transposed_conv4)
concat4 = torch.cat([cropped_tensor4,transposed_conv4],1)
conv_concat4 = self.dconv4(concat4)
return self.out(conv_concat4)
|
GHData/aqbewtra_Multi-Class-Aerial-Segmentation/model.py: 83-122
def forward(self, x):
#PAD ENCODED BLOCKS BEFORE CONCATTENATING TENSORS
encode1 = self.down1(x)
encode2 = self.maxPool1(encode1)
encode2 = self.down2(encode2)
encode3 = self.maxPool2(encode2)
encode3 = self.down3(encode3)
encode4 = self.maxPool3(encode3)
encode4 = self.down4(encode4)
bottleneck = self.maxPool4(encode4)
bottleneck = self.bottleneck(bottleneck)
decode = self.upConv4(bottleneck)
#PAD
decode = pad_to_match(decode, encode4)
decode = torch.cat((decode, encode4), dim=1)
decode = self.up4(decode)
decode = self.upConv3(decode)
decode = pad_to_match(decode, encode3)
decode = torch.cat((decode, encode3), dim=1)
decode = self.up3(decode)
decode = self.upConv2(decode)
decode = pad_to_match(decode, encode2)
decode = torch.cat((decode, encode2), dim=1)
decode = self.up2(decode)
decode = self.upConv1(decode)
decode = pad_to_match(decode, encode1)
decode = torch.cat((decode, encode1), dim=1)
decode = self.up1(decode)
seg_map = self.outMap(decode)
return transforms.Resize((300,300))(seg_map)
|
GHData/devbruce_Segmentation-UNet-PyTorch/model.py: 65-109
def forward(self, x):
# Encoder
enc1_1 = self.enc1_1(x)
enc1_2 = self.enc1_2(enc1_1)
pool1 = self.pool1(enc1_2)
enc2_1 = self.enc2_1(pool1)
enc2_2 = self.enc2_2(enc2_1)
pool2 = self.pool2(enc2_2)
enc3_1 = self.enc3_1(pool2)
enc3_2 = self.enc3_2(enc3_1)
pool3 = self.pool3(enc3_2)
enc4_1 = self.enc4_1(pool3)
enc4_2 = self.enc4_2(enc4_1)
pool4 = self.pool4(enc4_2)
enc5_1 = self.enc5_1(pool4)
# Decoder
dec5_1 = self.dec5_1(enc5_1)
upconv4 = self.upconv4(dec5_1)
cat4 = torch.cat((upconv4, enc4_2), dim=1) # dim={0: batch, 1: channel, 2: height, 3: width}
dec4_2 = self.dec4_2(cat4)
dec4_1 = self.dec4_1(dec4_2)
upconv3 = self.upconv3(dec4_1)
cat3 = torch.cat((upconv3, enc3_2), dim=1)
dec3_2 = self.dec3_2(cat3)
dec3_1 = self.dec3_1(dec3_2)
upconv2 = self.upconv2(dec3_1)
cat2 = torch.cat((upconv2, enc2_2), dim=1)
dec2_2 = self.dec2_2(cat2)
dec2_1 = self.dec2_1(dec2_2)
upconv1 = self.upconv1(dec2_1)
cat1 = torch.cat((upconv1, enc1_2), dim=1)
dec1_2 = self.dec1_2(cat1)
dec1_1 = self.dec1_1(dec1_2)
x = self.conv1x1(dec1_1)
return x
|
GHData/tabularasa066a_DL_Othello_PyTorch/network.py: 39-80
def forward(self, x):
## 第1層
x = self.conv_type1(x)
x = self.bn(x)
x = self.relu(x)
## 第2~9層 TODO:これをまとめてHIDDEN_LAYER_NUMの数だけ層を作れるように
x = self.conv_type2(x)
x = self.bn(x)
x = self.relu(x)
x = self.conv_type2(x)
x = self.bn(x)
x = self.relu(x)
x = self.conv_type2(x)
x = self.bn(x)
x = self.relu(x)
x = self.conv_type2(x)
x = self.bn(x)
x = self.relu(x)
x = self.conv_type2(x)
x = self.bn(x)
x = self.relu(x)
x = self.conv_type2(x)
x = self.bn(x)
x = self.relu(x)
x = self.conv_type2(x)
x = self.bn(x)
x = self.relu(x)
x = self.conv_type2(x)
x = self.bn(x)
x = self.relu(x)
## 第10層
x = self.conv_type3(x)
size = x.data.shape[0] # size = 100(MINIBATCH SIZEと同じ)
x = torch.reshape(x, (size, 64)) # (100, 64)の形に
## softmax出力は使っちゃダメ(pytorchでは損失関数のところに自動的に組み込まれる)
y = F.softmax(x, dim=1)
return x
|
GHData/pshlego_HDNet_torch_ami/hourglass_net_depth_torch.py: 56-93
def forward(self, x):
x = x.permute(0,3,1,2)
out_0=self.c0(x)
out_1=self.c1(out_0)
out_2=self.c2(out_1)
out_3=self.p0(out_2)
out_4=self.c3(out_3)
out_5=self.p1(out_4)
out_6=self.c4(out_5)
out_7=self.p2(out_6)
out_8=self.c5(out_7)
out_9=self.p3(out_8)
out_10=self.c6(out_9)
out_11=self.c7(out_10)
out_12=self.c8(out_11)
out_13=self.up1(out_12)
out_14=torch.cat([out_13,out_8],dim=1)
out_15=self.c9(out_14)
out_16=self.c10(out_15)
out_17=self.up2(out_16)
out_18=torch.cat([out_17,out_6],dim=1)
out_19=self.c11(out_18)
out_20=self.c12(out_19)
out_21=self.up3(out_20)
out_22=torch.cat([out_21,out_4],dim=1)
out_23=self.c13(out_22)
out_24=self.c14(out_23)
out_25=self.up4(out_24)
out_26=torch.cat([out_25,out_1],dim=1)
out_27=self.c15(out_26)
out_28=self.c16(out_27)
stack_out_d=self.c17(out_28)
stack_out_d = stack_out_d.permute(0,2,3,1)
return stack_out_d
|
GHData/salmedina_soundnet_pytorch/soundnet.py: 75-124
def forward(self, waveform):
"""
Args:
waveform (Variable): Raw 20s waveform.
"""
if torch.cuda.is_available():
waveform.cuda()
output = dict()
x = self.conv1(waveform)
x = self.batchnorm1(x)
x = self.relu1(x)
x = self.maxpool1(x)
x = self.conv2(x)
x = self.batchnorm2(x)
x = self.relu2(x)
x = self.maxpool2(x)
x = self.conv3(x)
x = self.batchnorm3(x)
x = self.relu3(x)
x = self.conv4(x)
x = self.batchnorm4(x)
x = self.relu4(x)
x = self.conv5(x)
x = self.batchnorm5(x)
x = self.relu5(x)
x = self.maxpool5(x)
x = self.conv6(x)
x = self.batchnorm6(x)
x = self.relu6(x)
x = self.conv7(x)
output['conv7'] = x.data.numpy()
x = self.batchnorm7(x)
x = self.relu7(x)
x_obj = self.conv8_objs(x)
output['y_obj'] = x_obj.data.numpy()
x_scns = self.conv8_scns(x)
output['y_scns'] = x_scns.data.numpy()
return output
|
GHData/chuanli11_WCT-PyTorch/ae.py: 343-376
def forward(self,x):
out = self.conv1(x)
out = self.reflecPad1(out)
out = self.conv2(out)
out = self.relu2(out)
out = self.reflecPad3(out)
out = self.conv3(out)
pool1 = self.relu3(out)
out,pool_idx = self.maxPool(pool1)
out = self.reflecPad4(out)
out = self.conv4(out)
out = self.relu4(out)
out = self.reflecPad5(out)
out = self.conv5(out)
pool2 = self.relu5(out)
out,pool_idx2 = self.maxPool2(pool2)
out = self.reflecPad6(out)
out = self.conv6(out)
out = self.relu6(out)
out = self.reflecPad7(out)
out = self.conv7(out)
out = self.relu7(out)
out = self.reflecPad8(out)
out = self.conv8(out)
out = self.relu8(out)
out = self.reflecPad9(out)
out = self.conv9(out)
pool3 = self.relu9(out)
out,pool_idx3 = self.maxPool3(pool3)
out = self.reflecPad10(out)
out = self.conv10(out)
out = self.relu10(out)
return out
|
GHData/pshlego_HDNet_torch_ami/hourglass_net_normal_torch.py: 56-93
def forward(self, x):
x = x.permute(0,3,1,2)
out_0=self.c0(x)
out_1=self.c1(out_0)
out_2=self.c2(out_1)
out_3=self.p0(out_2)
out_4=self.c3(out_3)
out_5=self.p1(out_4)
out_6=self.c4(out_5)
out_7=self.p2(out_6)
out_8=self.c5(out_7)
out_9=self.p3(out_8)
out_10=self.c6(out_9)
out_11=self.c7(out_10)
out_12=self.c8(out_11)
out_13=self.up1(out_12)
out_14=torch.cat([out_13,out_8],dim=1)
out_15=self.c9(out_14)
out_16=self.c10(out_15)
out_17=self.up2(out_16)
out_18=torch.cat([out_17,out_6],dim=1)
out_19=self.c11(out_18)
out_20=self.c12(out_19)
out_21=self.up3(out_20)
out_22=torch.cat([out_21,out_4],dim=1)
out_23=self.c13(out_22)
out_24=self.c14(out_23)
out_25=self.up4(out_24)
out_26=torch.cat([out_25,out_1],dim=1)
out_27=self.c15(out_26)
out_28=self.c16(out_27)
stack_out_d=self.c17(out_28)
stack_out_d = stack_out_d.permute(0,2,3,1)
return stack_out_d
|
GHData/kose_PyTorch_MNIST_Optuna/mnist_cnn.py: 61-77
def forward(self, x):
batch_size = x.shape[0]
h = F.relu(self.conv1(x))
h = F.relu(self.conv2(h))
h = F.max_pool2d(h, 2)
h = self.dropout1(h)
h = torch.flatten(h, 1)
h = F.relu(self.fc1(h))
h = self.dropout2(h)
h = self.fc2(h)
return F.log_softmax(h, dim=1)
|
GHData/as595_PyTorchBCNN/models.py: 111-123
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.max_pool2d(x, 2)
x = F.relu(self.conv2(x))
x = F.max_pool2d(x, 2)
x = x.view(x.size()[0], -1)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.drop(x)
x = self.fc3(x)
return x
|
GHData/RajanNV_AI_Models_PyTorch/UNet2D.py: 86-139
def forward(self, x):
d1 = self.Conv1(x)
d2 = self.Maxpool(d1)
d3 = self.drop(d2)
d4 = self.Conv2(d3)
d5 = self.Maxpool(d4)
d6= self.drop(d5)
d7 = self.Conv3(d6)
d8 = self.Maxpool(d7)
d9 = self.drop(d8)
d10 = self.Conv4(d9)
d11 = self.Maxpool(d10)
d12 = self.drop(d11)
d13 = self.Conv5(d12)
u15 = self.Up5(d13)
u14 = torch.cat((d10, u15), dim=1)
u13 = self.Up_conv5(u14)
u12= self.Up4(u13)
u11 = torch.cat((d7, u12), dim=1)
u10 = self.drop(u11)
u9 = self.Up_conv4(u10)
u8 = self.Up3(u9)
u7 = torch.cat((d4, u8), dim=1)
u6 = self.drop(u7)
u5 = self.Up_conv3(u6)
u4 = self.Up2(u5)
u3 = torch.cat((d1, u4), dim=1)
u2 = self.drop(u3)
u1 = self.Up_conv2(u2)
u = self.Conv(u1)
# return u
#
# ff1=self.Conv21(u)
# ff1=self.Conv22(ff1)
# ff1=self.Conv23(ff1)
# return ff1
ff1=self.Con1(u)
ff1= self.Maxpool(ff1)
ff1 = self.drop(ff1)
ff1=self.Con2(ff1)
ff1= self.Maxpool(ff1)
ff1 = self.drop(ff1)
ff1=self.Con3(ff1)
return ff1
|
GHData/HopefulRational_DeepCaps-PyTorch/deepcaps.py: 1008-1050
def forward(self, x, target=None):
x = self.conv2d(x)
x = self.batchNorm(x)
x = self.toCaps(x)
x = self.conv2dCaps1_nj_4_strd_2(x)
x_skip = self.conv2dCaps1_nj_4_strd_1_1(x)
x = self.conv2dCaps1_nj_4_strd_1_2(x)
x = self.conv2dCaps1_nj_4_strd_1_3(x)
x = x + x_skip
x = self.conv2dCaps2_nj_8_strd_2(x)
x_skip = self.conv2dCaps2_nj_8_strd_1_1(x)
x = self.conv2dCaps2_nj_8_strd_1_2(x)
x = self.conv2dCaps2_nj_8_strd_1_3(x)
x = x + x_skip
x = self.conv2dCaps3_nj_8_strd_2(x)
x_skip = self.conv2dCaps3_nj_8_strd_1_1(x)
x = self.conv2dCaps3_nj_8_strd_1_2(x)
x = self.conv2dCaps3_nj_8_strd_1_3(x)
x = x + x_skip
x1 = x
x = self.conv2dCaps4_nj_8_strd_2(x)
x_skip = self.conv3dCaps4_nj_8(x)
x = self.conv2dCaps4_nj_8_strd_1_1(x)
x = self.conv2dCaps4_nj_8_strd_1_2(x)
x = x + x_skip
x2 = x
# x1.shape : torch.Size([64, 32, 8, 4, 4]) | x2.shape : torch.Size([64, 32, 8, 2, 2]) (for CIFAR10)
xa = self.flatCaps(x1)
xb = self.flatCaps(x2)
x = torch.cat((xa, xb), dim=-2)
dig_caps = self.digCaps(x)
x = self.capsToScalars(dig_caps)
masked, indices = self.mask(dig_caps, target)
decoded = self.decoder(masked)
return dig_caps, masked, decoded, indices
|
GHData/SWKoreaBME_D-Unet_PyTorch/DUnet.py: 74-129
def forward(self, x):
input3d = self.Expand()(x) # 1, batch_size, 4, 192, 192
input3d = input3d.permute(1, 0, 2, 3, 4) # batch_size, 1, 4, 192, 192
# 3d Stream
conv3d1 = self.bn_3d_1(input3d)
pool3d1 = self.MaxPool3d(conv3d1)
conv3d2 = self.bn_3d_2(pool3d1)
pool3d2 = self.MaxPool3d(conv3d2)
conv3d3 = self.bn_3d_3(pool3d2)
# 2d Encoding
in_channels = self.in_channels
conv1 = self.bn_2d_1(x)
pool1 = self.MaxPool2d(conv1)
conv2 = self.bn_2d_2(pool1)
conv2 = self.se_add_2(conv3d2, conv2)
pool2 = self.MaxPool2d(conv2)
conv3 = self.bn_2d_3(pool2)
conv3 = self.se_add_3(conv3d3, conv3)
pool3 = self.MaxPool2d(conv3)
conv4 = self.bn_2d_4(pool3)
conv4 = self.Dropout(conv4)
pool4 = self.MaxPool2d(conv4)
conv5 = self.bn_2d_5(pool4)
conv5 = self.Dropout(conv5)
# Decoding
up6 = self.up_block_1(conv5)
merge6 = torch.cat(([conv4, up6]), 1)
conv6 = self.bn_2d_6(merge6)
up7 = self.up_block_2(conv6)
merge7 = torch.cat(([conv3, up7]), 1)
conv7 = self.bn_2d_7(merge7)
up8 = self.up_block_3(conv7)
merge8 = torch.cat(([conv2, up8]), 1)
conv8 = self.bn_2d_8(merge8)
up9 = self.up_block_4(conv8)
merge9 = torch.cat(([conv1, up9]), 1)
conv9 = self.bn_2d_9(merge9)
conv10 = self.conv_10(conv9)
return conv10
|
GHData/HopefulRational_DeepCaps-PyTorch/deepcaps.py: 913-954
def forward(self, x, target=None):
x = self.conv2d(x)
x = self.batchNorm(x)
x = self.toCaps(x)
x = self.conv2dCaps1_nj_4_strd_2(x)
x_skip = self.conv2dCaps1_nj_4_strd_1_1(x)
x = self.conv2dCaps1_nj_4_strd_1_2(x)
x = self.conv2dCaps1_nj_4_strd_1_3(x)
x = x + x_skip
x = self.conv2dCaps2_nj_8_strd_2(x)
x_skip = self.conv2dCaps2_nj_8_strd_1_1(x)
x = self.conv2dCaps2_nj_8_strd_1_2(x)
x = self.conv2dCaps2_nj_8_strd_1_3(x)
x = x + x_skip
x = self.conv2dCaps3_nj_8_strd_2(x)
x_skip = self.conv2dCaps3_nj_8_strd_1_1(x)
x = self.conv2dCaps3_nj_8_strd_1_2(x)
x = self.conv2dCaps3_nj_8_strd_1_3(x)
x = x + x_skip
x1 = x
x = self.conv2dCaps4_nj_8_strd_2(x)
x_skip = self.conv3dCaps4_nj_8(x)
x = self.conv2dCaps4_nj_8_strd_1_1(x)
x = self.conv2dCaps4_nj_8_strd_1_2(x)
x = x + x_skip
x2 = x
xa = self.flatCaps(x1)
xb = self.flatCaps(x2)
x = torch.cat((xa, xb), dim=-2)
dig_caps = self.digCaps(x)
x = self.capsToScalars(dig_caps)
masked, indices = self.mask(dig_caps, target)
decoded = self.decoder(masked)
return dig_caps, masked, decoded, indices
|
GHData/long123524_BsiNet-torch/models.py: 350-392
def forward(self, x):
x1 = self.conv1(x)
x2 = self.conv2(x1)
x2 = self.pool1(x2)
x3 = self.conv3(x2)
x3 = self.pool1(x3)
x4 = self.conv4(x3)
x4 = self.pool1(x4)
x5 = self.conv5(x4)
x5 = self.pool2(x5)
x_6 = self.upsample2(x5)
x6 = self.conv6(torch.cat([x_6, x4], 1))
x6 = self.upsample1(x6)
x7 = self.conv7(torch.cat([x6, x3], 1))
x7 = self.upsample1(x7)
x8 = self.conv8(torch.cat([x7, x2], 1))
x8 = self.upsample1(x8)
x9 = self.conv9(torch.cat([x8, x1], 1))
x_out = self.sge(x9)
if self.add_output:
x_out1 = self.conv_final1(x_out)
x_out2 = self.conv_final2(x_out)
x_out3 = self.conv_final3(x_out)
if self.num_classes > 1:
x_out1 = F.log_softmax(x_out1, dim=1)
x_out2 = F.log_softmax(x_out2, dim=1)
x_out3 = torch.sigmoid(x_out3)
return [x_out1, x_out2, x_out3]
|
GHData/sharathmaidargi_finetune_torchvision/inception.py: 76-135
def forward(self, x):
if self.transform_input:
x_ch0 = torch.unsqueeze(x[:, 0], 1) * (0.229 / 0.5) + (0.485 - 0.5) / 0.5
x_ch1 = torch.unsqueeze(x[:, 1], 1) * (0.224 / 0.5) + (0.456 - 0.5) / 0.5
x_ch2 = torch.unsqueeze(x[:, 2], 1) * (0.225 / 0.5) + (0.406 - 0.5) / 0.5
x = torch.cat((x_ch0, x_ch1, x_ch2), 1)
# N x 3 x 299 x 299
x = self.Conv2d_1a_3x3(x)
# N x 32 x 149 x 149
x = self.Conv2d_2a_3x3(x)
# N x 32 x 147 x 147
x = self.Conv2d_2b_3x3(x)
# N x 64 x 147 x 147
x = F.max_pool2d(x, kernel_size=3, stride=2)
# N x 64 x 73 x 73
x = self.Conv2d_3b_1x1(x)
# N x 80 x 73 x 73
x = self.Conv2d_4a_3x3(x)
# N x 192 x 71 x 71
x = F.max_pool2d(x, kernel_size=3, stride=2)
# N x 192 x 35 x 35
x = self.Mixed_5b(x)
# N x 256 x 35 x 35
x = self.Mixed_5c(x)
# N x 288 x 35 x 35
x = self.Mixed_5d(x)
# N x 288 x 35 x 35
x = self.Mixed_6a(x)
# N x 768 x 17 x 17
x = self.Mixed_6b(x)
# N x 768 x 17 x 17
x = self.Mixed_6c(x)
# N x 768 x 17 x 17
x = self.Mixed_6d(x)
# N x 768 x 17 x 17
x = self.Mixed_6e(x)
# N x 768 x 17 x 17
if self.training and self.aux_logits:
aux = self.AuxLogits(x)
# N x 768 x 17 x 17
x = self.Mixed_7a(x)
# N x 1280 x 8 x 8
x = self.Mixed_7b(x)
# N x 2048 x 8 x 8
x = self.Mixed_7c(x)
# N x 2048 x 8 x 8
# Adaptive average pooling
x = F.adaptive_avg_pool2d(x, (1, 1))
# N x 2048 x 1 x 1
x = F.dropout(x, training=self.training)
# N x 2048 x 1 x 1
x = x.view(x.size(0), -1)
# N x 2048
# x = self.fc(x)
# N x 1000 (num_classes)
if self.training and self.aux_logits:
return x, aux
return x
|
GHData/chuanli11_WCT-PyTorch/ae.py: 744-788
def forward(self,x):
# decoder
out = self.reflecPad15(x)
out = self.conv15(out)
out = self.relu15(out)
out = self.unpool(out)
out = self.reflecPad16(out)
out = self.conv16(out)
out = self.relu16(out)
out = self.reflecPad17(out)
out = self.conv17(out)
out = self.relu17(out)
out = self.reflecPad18(out)
out = self.conv18(out)
out = self.relu18(out)
out = self.reflecPad19(out)
out = self.conv19(out)
out = self.relu19(out)
out = self.unpool2(out)
out = self.reflecPad20(out)
out = self.conv20(out)
out = self.relu20(out)
out = self.reflecPad21(out)
out = self.conv21(out)
out = self.relu21(out)
out = self.reflecPad22(out)
out = self.conv22(out)
out = self.relu22(out)
out = self.reflecPad23(out)
out = self.conv23(out)
out = self.relu23(out)
out = self.unpool3(out)
out = self.reflecPad24(out)
out = self.conv24(out)
out = self.relu24(out)
out = self.reflecPad25(out)
out = self.conv25(out)
out = self.relu25(out)
out = self.unpool4(out)
out = self.reflecPad26(out)
out = self.conv26(out)
out = self.relu26(out)
out = self.reflecPad27(out)
out = self.conv27(out)
return out
|
GHData/DableUTeeF_HiResTorch/hardcodedmodels.py: 199-253
def forward(self, x):
out = self.conv1(x)
out = self.bn1(out)
out = F.relu(out)
path = F.avg_pool2d(out, 28)
out = self.layer1_1(out)
path += F.avg_pool2d(out, 28)
out = F.relu(out)
path = F.relu(path)
out = self.layer1_2(out)
path += F.avg_pool2d(out, 28)
out = F.relu(out)
path = F.relu(path)
out = self.layer1_3(out)
# path += F.avg_pool2d(out, 28)
path = torch.cat((path, F.avg_pool2d(out, 28)), 1)
out = F.relu(out)
path = F.relu(path)
out = F.max_pool2d(out, 2)
out = self.layer2_1(out)
path += F.avg_pool2d(out, 14)
out = F.relu(out)
path = F.relu(path)
out = self.layer2_2(out)
path += F.avg_pool2d(out, 14)
out = F.relu(out)
path = F.relu(path)
out = self.layer2_3(out)
# path += F.avg_pool2d(out, 14)
path = torch.cat((path, F.avg_pool2d(out, 14)), 1)
out = F.relu(out)
path = F.relu(path)
# path += F.avg_pool2d(out, 14)
out = F.max_pool2d(out, 2)
out = self.layer3_1(out)
path += F.avg_pool2d(out, 7)
out = F.relu(out)
path = F.relu(path)
out = self.layer3_2(out)
path += F.avg_pool2d(out, 7)
out = F.relu(out)
path = F.relu(path)
out = self.layer3_3(out)
# path = torch.cat((path, F.avg_pool2d(out, 7)), 1)
path += F.avg_pool2d(out, 7)
path = F.relu(path)
# out = F.avg_pool2d(path, 4)
out = path.view(path.size(0), -1)
out = self.linear(out)
return out
|
GHData/pei-hsin-chiu_PyTorchTutorials/vae_2digits_4levels_MSE.py: 107-121
def forward(self, x):
x = self.fc(x)
x = x.view(x.size(0), self.capacity*1, 7, 7) ## unflatten batch of feature vectors to a batch of multi-channel feature maps
x = F.relu(self.conv5(x))
x = self.dropout(x)
x = F.relu(self.conv4(x))
x = self.dropout(x)
x = F.relu(self.conv3(x))
x = self.dropout(x)
x = F.relu(self.conv2(x))
x = torch.sigmoid(self.conv1(x)) # last layer before output is sigmoid, since we are using BCE as reconstruction loss
return x
|
GHData/michaelfedell_Learn_PyTorch/simple_net.py: 18-32
def forward(self, x):
x = self.conv1(x) # apply convolution
x = F.relu(x) # Relu activation
x = F.max_pool2d(x, (2, 2)) # maxpool subsampling
x = F.max_pool2d(F.relu(self.conv2(x)), 2)
x = x.view(-1, self.num_flat_features(x)) # flatten for FC layers
x = F.relu(self.fc1(x)) # FC (dense) layer with relu activation
x = F.relu(self.fc2(x))
x = self.fc3(x) # output layer (no activation)
return x
|
GHData/VictoriousRaptor_sst-clf-torch/TextCNN.py: 29-59
def forward(self, x):
# (batch_size, batch_dim)
# Embedding
x = self.word_embeddings(x) # (batch_size, batch_dim, embedding_len)
x = x.unsqueeze(1) # (batch_size, 1, batch_dim, embedding_len)
# print(x.size())
# Conv and relu
x = [self.relu(conv(x)).squeeze(3) for conv in self.convs] # [(batch_size, kernel_num, batch_dim-K+1), ...]*len(Ks)
# print([i.shape for i in x])
# max-over-time pooling (actually merely max)
x = [F.max_pool1d(xi, xi.size()[2]).squeeze(2) for xi in x] # [(batch_size, kernel_num), ...]*len(Ks)
# print([i.shape for i in x])
x = torch.cat(x, dim=1) # (batch_size, kernel_num*len(Ks))
x = self.bn1(x) # Batch Normaliztion
x = self.dropout(x) # (batch_size, len(Ks)*kernel_num), dropout
# print(x.size())
x = self.fc(x) # (batch_size, label_num)
# print(x.size())
# x = self.fc1(x)
# x = self.bn2(x)
# x = self.fc2(x)
logit = self.softmax(x)
return logit
|
GHData/AdarshK1_meam517_final/model.py: 125-147
def forward(self, map):
map = F.relu((self.conv1(map)))
map = F.relu((self.conv2(map)))
# print(map.shape)
out = torch.flatten(map, start_dim=1)
# print("flattened", out.shape)
u1_out = F.relu(self.fcn_1_u1(out))
u1_out = F.relu(self.fcn_2_u1(u1_out))
u1_out = self.fcn_3_u1(u1_out)
u2_out = F.relu(self.fcn_1_u2(out))
u2_out = F.relu(self.fcn_2_u2(u2_out))
u2_out = self.fcn_3_u2(u2_out)
u3_out = F.relu(self.fcn_1_u3(out))
u3_out = F.relu(self.fcn_2_u3(u3_out))
u3_out = self.fcn_3_u3(u3_out)
return u1_out, u2_out, u3_out
|
GHData/agrechnev_torch-fun1/my_cif10.py: 51-78
def forward(self, x):
x = F.relu(self.conv1_1(x))
x = F.relu(self.conv1_2(x))
x = self.dropout1(x)
#
x = F.max_pool2d(x, 2)
#
x = F.relu(self.conv2_1(x))
x = F.relu(self.conv2_2(x))
x = self.dropout2(x)
#
x = F.max_pool2d(x, 2)
#
x = F.relu(self.conv3_1(x))
x = F.relu(self.conv3_2(x))
x = self.dropout3(x)
#
x = F.max_pool2d(x, 2)
#
x = torch.flatten(x, 1)
x = F.relu(self.fc1(x))
# x = self.dropout_f1(x)
x = self.fc2(x)
#
out = F.log_softmax(x, dim=1)
return out
########################################################################################################################
|
GHData/omerunlusoy_Computer-Vision-Practices-with-PyTorch/CNN_Hyperparameter_Tuning.py: 187-200
def forward(self, x):
x = Functional.relu(self.conv1(x)) # activation function is relu rather than sigmoid
x = Functional.max_pool2d(x, pooling_kernel_size, pooling_kernel_size)
x = Functional.relu(self.conv2(x))
x = Functional.max_pool2d(x, pooling_kernel_size, pooling_kernel_size)
x = Functional.relu(self.conv3(x))
x = Functional.max_pool2d(x, pooling_kernel_size, pooling_kernel_size)
x = x.view(-1, fc1_input_size) # x must be flattened before entering fully connected layer
x = Functional.relu(self.fc1(x))
x = self.dropout1(x)
x = self.fc2(x) # rather than the probability, we get score (raw output) for nn.CrossEntropyLoss
return x
|
GHData/ShaoQiBNU_pyTorch_MNIST/pytorch_MNIST.py: 38-50
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.max_pool2d(x, 2, 2)
x = F.relu(self.conv2(x))
x = F.max_pool2d(x, 2, 2)
x = x.view(-1, 4*4*50)
x = F.relu(self.fc1(x))
x = self.fc2(x)
x = F.log_softmax(x, dim=1)
return x
################# train #################
|
GHData/mickymicmouse_v_OCGAN_torch/networks.py: 81-96
def forward(self, input, args):
latent = args.latent
output = input.view(input.size(0),latent,1,1)
output = self.batch_norm_1(self.conv1(output))
output = F.relu(output)
output = self.batch_norm_2(self.conv2(output))
output = F.relu(output)
output = self.batch_norm_3(self.conv3(output))
output = self.dropout(output)
output = F.relu(output)
output = torch.tanh(self.conv4(output))
return output
|
GHData/gdelacruzfdez_blink-detection-torch/evaluator.py: 332-346
def calculate_blinks_for_video(self, video_dataframe):
gt_blinks = self.convert_annotation_to_blinks(video_dataframe)
partial_gt_blinks, complete_gt_blinks = self.divide_partial_and_full_blinks(gt_blinks)
partial_gt_blinks = self.merge_double_blinks(self.delete_non_visible_blinks(partial_gt_blinks))
complete_gt_blinks = self.merge_double_blinks(self.delete_non_visible_blinks(complete_gt_blinks))
pred_blinks = self.convert_predictions_to_blinks(video_dataframe)
partial_pred_blinks, complete_pred_blinks = self.divide_partial_and_full_blinks(pred_blinks)
partial_pred_blinks = self.merge_double_blinks(self.delete_non_visible_blinks(partial_pred_blinks))
complete_pred_blinks = self.merge_double_blinks(self.delete_non_visible_blinks(complete_pred_blinks))
return partial_gt_blinks, complete_gt_blinks, partial_pred_blinks, complete_pred_blinks
|
GHData/abdxxw_Bit-wise-training-on-PyTorch/models.py: 54-73
def forward(self, x):
out = F.relu(self.conv1(x))
out = F.max_pool2d(out, 2)
out = F.relu(self.conv2(out))
out = F.max_pool2d(out, 2)
out = out.view(out.size(0), -1)
out = F.relu(self.fc1(out))
out = F.relu(self.fc2(out))
out = self.fc3(out)
return out
###################################################################################################
###################################################################################################
# ResNet for CIFAR dataset
###################################################################################################
###################################################################################################
|
GHData/sihamdmostafa_Bit-wise-training-on-PyTorch/models.py: 54-73
def forward(self, x):
out = F.relu(self.conv1(x))
out = F.max_pool2d(out, 2)
out = F.relu(self.conv2(out))
out = F.max_pool2d(out, 2)
out = out.view(out.size(0), -1)
out = F.relu(self.fc1(out))
out = F.relu(self.fc2(out))
out = self.fc3(out)
return out
###################################################################################################
###################################################################################################
# ResNet for CIFAR dataset
###################################################################################################
###################################################################################################
|
GHData/ajinkya98_TensorBoard_PyTorch/model.py: 27-39
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.max_pool2d(x, kernel_size = 2, stride = 2)
x = F.relu(self.conv2(x))
x = F.max_pool2d(x, kernel_size = 2, stride = 2)
x = torch.flatten(x,start_dim = 1)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.out(x)
return x
#Loading datasets
|
GHData/trisha025_Deep-Learning-with-PyTorch/training_CNN_loop.py: 28-45
def forward(self, t):
t = F.relu(self.conv1(t))
t = F.max_pool2d(t, kernel_size=2, stride=2)
t = F.relu(self.conv2(t))
t = F.max_pool2d(t, kernel_size=2, stride=2)
t = t.reshape(-1, 12*4*4)
t = F.relu(self.fc1(t))
t = F.relu(self.fc2(t))
t = self.out(t)
return t
#training set
|
GHData/alejands_torchEGClass/egclass.py: 67-78
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.max_pool2d(F.relu(self.conv2(x)), (2, 2))
x = F.relu(self.conv3(x))
x = F.max_pool2d(F.relu(self.conv4(x)), (2, 2))
#x = x.view(-1, self.num_flat_features(x))
x = x.view(x.size()[0], -1)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
|
GHData/polasha_CNN-with-PyTorch-on-MNIST/Convolution%20neural%20network%20with%20pytorch.py: 40-50
def forward(self, X):
X = F.relu(self.conv1(X))
X = F.max_pool2d(X, 2, 2)
X = F.relu(self.conv2(X))
X = F.max_pool2d(X, 2, 2)
X = X.view(-1, 5*5*16)
X = F.relu(self.fc1(X))
X = F.relu(self.fc2(X))
X = self.fc3(X)
return F.log_softmax(X, dim=1)
|
GHData/polasha_Convolution-Neural-Network_PyTorch_CIFAR-Dataset/Convolution%20neural%20network%20for%20color%20image%20with%20CIFAR%20dataset_PyTorch.py: 67-80
def forward(self, X):
X = F.relu(self.conv1(X))
X = F.max_pool2d(X, 2, 2) #2*2 kernel and stride 2
X = F.relu(self.conv2(X))
X = F.max_pool2d(X, 2, 2)
X = X.view(-1, 6*6*16)
X = F.relu(self.fc1(X))
X = F.relu(self.fc2(X))
X = self.fc3(X)
return F.log_softmax(X, dim=1)
|
GHData/asdf93074_mnistFashionPyTorch/net.py: 18-28
def forward(self, x):
x = x.type('torch.FloatTensor')
x = F.relu(self.conv1(x))
x = self.pool(x)
x = F.relu(self.conv2(x))
x = self.pool(x)
x = x.view(-1, 64*7*7)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x, dim=1)
|
GHData/polasha_Convolution-Neural-Network_PyTorch_Real-Images-datasets/CNN_Real%20Image%20Data_PyTorch.py: 325-339
def forward(self, X):
X = F.relu(self.conv1(X))
X = F.max_pool2d(X, 2, 2)
X = F.relu(self.conv2(X))
X = F.max_pool2d(X, 2, 2)
X = X.view(-1, 54*54*16)
X = F.relu(self.fc1(X))
X = F.relu(self.fc2(X))
X = self.fc3(X)
return F.log_softmax(X, dim=1)
#Define loss and optimization calculation
|
GHData/frozenparadox99_pyTorch-Course/cifar.py: 63-75
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.max_pool2d(x, 2, 2)
x = F.relu(self.conv2(x))
x = F.max_pool2d(x, 2, 2)
x = F.relu(self.conv3(x))
x = F.max_pool2d(x, 2, 2)
x = x.view(-1, 4*4*64)
x = F.relu(self.fc1(x))
x = self.dropout1(x)
x = self.fc2(x)
return x
|
GHData/MayankSingal_PyTorch-Zero-Shot-Super-Resolution/net.py: 21-35
def forward(self, LR_img):
x = self.relu(self.Conv1(LR_img))
x = self.relu(self.Conv2(x))
x = self.relu(self.Conv3(x))
x = self.relu(self.Conv4(x))
x = self.relu(self.Conv5(x))
x = self.relu(self.Conv6(x))
x = self.relu(self.Conv7(x))
x = self.Conv8(x)
SR_img = LR_img + x # Because we have to learn residuals.
return SR_img
|
GHData/Lornatang_CARN-PyTorch/model.py: 65-80
def forward(self, x: torch.Tensor) -> torch.Tensor:
rb1 = self.rb1(x)
concat1 = torch.cat([rb1, x], 1)
conv1 = self.conv1(concat1)
rb2 = self.rb2(conv1)
concat2 = torch.cat([concat1, rb2], 1)
conv2 = self.conv2(concat2)
rb3 = self.rb3(conv2)
concat3 = torch.cat([concat2, rb3], 1)
conv3 = self.conv3(concat3)
return conv3
|
GHData/rowantseng_FUnIE-GAN-PyTorch/datasets.py: 16-33
def augment(dt_im, eh_im):
# Random interpolation
a = random.random()
dt_im = dt_im * a + eh_im * (1 - a)
# Random flip left right
if random.random() < 0.25:
dt_im = np.fliplr(dt_im)
eh_im = np.fliplr(eh_im)
# Random flip up down
if random.random() < 0.25:
dt_im = np.flipud(dt_im)
eh_im = np.flipud(eh_im)
return dt_im, eh_im
|
GHData/jweig0ld_VAE/vae_skeleton.py: 242-254
def forward(self, state):
post_encoder = self.encoder(state)
mu, logvar = self.mu_layer(post_encoder), self.logvar_layer(post_encoder)
z = self.reparameterize(mu, logvar)
z = self.transition_layer(z)
y = self.decoder(z)
if y.shape[1:] != (self.in_channels, self.x_dim, self.x_dim):
y = y[:, :self.in_channels, :self.x_dim, :self.x_dim]
probs = torch.sigmoid(y)
return y, probs, mu, logvar
|
GHData/Lornatang_EDSR-PyTorch/model.py: 87-101
def _forward_impl(self, x: torch.Tensor) -> torch.Tensor:
# The images by subtracting the mean RGB value of the DIV2K dataset.
out = x.sub_(self.mean).mul_(255.)
out1 = self.conv1(out)
out = self.trunk(out1)
out = self.conv2(out)
out = torch.add(out, out1)
out = self.upsampling(out)
out = self.conv3(out)
out = out.div_(255.).add_(self.mean)
return out
|
GHData/TheRealBaka_Models-in-PyTorch/unet.py: 69-103
def forward(self, image):
# encoder
x1 = self.down_conv_1(image)
x2 = self.max_pool_2x2(x1)
x3 = self.down_conv_2(x2)
x4 = self.max_pool_2x2(x3)
x5 = self.down_conv_3(x4)
x6 = self.max_pool_2x2(x5)
x7 = self.down_conv_4(x6)
x8 = self.max_pool_2x2(x7)
x9 = self.down_conv_5(x8)
print(x9.shape)
# decoder
x = self.up_trans_1(x9)
y = crop_img(x7, x)
x = self.up_conv_1(torch.cat([x, y], 1))
x = self.up_trans_2(x)
y = crop_img(x5, x)
x = self.up_conv_2(torch.cat([x, y], 1))
x = self.up_trans_3(x)
y = crop_img(x3, x)
x = self.up_conv_3(torch.cat([x, y], 1))
x = self.up_trans_4(x)
y = crop_img(x1, x)
x = self.up_conv_4(torch.cat([x, y], 1))
x = self.out(x)
print(x.shape)
return x
|
GHData/Sudhanshu1304_Pix2Pix-In-PyTorch/unet.py: 103-141
def forward(self, x):
conv1 = self.conv1(x)
pool1 = self.dconv1(conv1)
conv2 = self.conv2(pool1)
pool2 = self.dconv2(conv2)
conv3 = self.conv3(pool2)
pool3 = self.dconv3(conv3)
conv4 = self.conv4(pool3)
pool4 = self.dconv4(conv4)
neck = self.neck(pool4)
upconv4 = self.upconv4(neck)
croped = self.crop(conv4, upconv4)
concat = torch.cat([upconv4, croped], 1)
dconv4 = self.Dconv4(concat)
upconv3 = self.upconv3(dconv4)
croped = self.crop(conv3, upconv3)
concat = torch.cat([upconv3, croped], 1)
dconv3 = self.Dconv3(concat)
upconv2 = self.upconv2(dconv3)
croped = self.crop(conv2, upconv2)
dconv2 = self.Dconv2(torch.cat([upconv2, croped], 1))
upconv1 = self.upconv1(dconv2)
croped = self.crop(conv1, upconv1)
dconv1 = self.Dconv1(torch.cat([upconv1, croped], 1))
out = self.out(dconv1)
if self.retain == True:
out = F.interpolate(out, list(x.shape)[2:])
return F.sigmoid(out)
|
GHData/DARK-art108_Unet-Torch-Implementation/unet.py: 63-91
def forward(self, image):
x1 = self.down_conv_1(image) # 568
x2 = self.max_pool_1(x1) # 284
x3 = self.down_conv_2(x2) # 280
x4 = self.max_pool_1(x3) # 140
x5 = self.down_conv_3(x4) # 136
x6 = self.max_pool_1(x5) # 68
x7 = self.down_conv_4(x6) # 64
x8 = self.max_pool_1(x7) # 32
x9 = self.down_conv_5(x8) # 28
# upsampling
x = self.up_trans_1(x9)
y = crop_tensor(x7, x)
x = self.up_conv_1(torch.cat([x, y], dim=1))
x = self.up_trans_2(x)
y = crop_tensor(x5, x)
x = self.up_conv_2(torch.cat([x, y], dim=1))
x = self.up_trans_3(x)
y = crop_tensor(x3, x)
x = self.up_conv_3(torch.cat([x, y], dim=1))
x = self.up_trans_4(x)
y = crop_tensor(x1, x)
x = self.up_conv_4(torch.cat([x, y], dim=1))
out = self.out(x)
return out
|
GHData/Sudhanshu1304_Unet-In-PyTorch/UNet1.py: 103-141
def forward(self, x):
conv1 = self.conv1(x)
pool1 = self.dconv1(conv1)
conv2 = self.conv2(pool1)
pool2 = self.dconv2(conv2)
conv3 = self.conv3(pool2)
pool3 = self.dconv3(conv3)
conv4 = self.conv4(pool3)
pool4 = self.dconv4(conv4)
neck = self.neck(pool4)
upconv4 = self.upconv4(neck)
croped = self.crop(conv4, upconv4)
concat = torch.cat([upconv4, croped], 1)
dconv4 = self.Dconv4(concat)
upconv3 = self.upconv3(dconv4)
croped = self.crop(conv3, upconv3)
concat = torch.cat([upconv3, croped], 1)
dconv3 = self.Dconv3(concat)
upconv2 = self.upconv2(dconv3)
croped = self.crop(conv2, upconv2)
dconv2 = self.Dconv2(torch.cat([upconv2, croped], 1))
upconv1 = self.upconv1(dconv2)
croped = self.crop(conv1, upconv1)
dconv1 = self.Dconv1(torch.cat([upconv1, croped], 1))
out = self.out(dconv1)
if self.retain == True:
out = F.interpolate(out, list(x.shape)[2:])
return F.sigmoid(out)
|
GHData/M-Daniyal-123_UNet-PyTorch/unet.py: 76-116
def forward(self, image):
"""
Image : size (Batch_Size,channels,572,572)
"""
### Expected size => batch_size,channel,height,weight
### Encoder ###
var1 = self.down_conv_1(image)
var2= self.max_pool_2x2(var1)
var3 = self.down_conv_2(var2)
var4= self.max_pool_2x2(var3)
var5 = self.down_conv_3(var4)
var6= self.max_pool_2x2(var5)
var7 = self.down_conv_4(var6)
var8= self.max_pool_2x2(var7)
var9 = self.down_conv_5(var8)
### Decoder ###
var10 = self.up_trans_1(var9)
temp_var7x10 = crop_image(var7,var10)
var10 = self.up_conv_1(torch.cat([var10,temp_var7x10],1))
var11 = self.up_trans_2(var10)
temp_var5x11 = crop_image(var5,var11)
var11 = self.up_conv_2(torch.cat([var11,temp_var5x11],1))
var12 = self.up_trans_3(var11)
temp_var3x12 = crop_image(var3,var12)
var12 = self.up_conv_3(torch.cat([var12,temp_var3x12],1))
var13 = self.up_trans_4(var12)
temp_var1x13 = crop_image(var1,var13)
var13 = self.up_conv_4(torch.cat([var13,temp_var1x13],1))
x = self.out(var13) ### Final Output ## Final Size (1,2,388,388)
return x
|
GHData/vence-andersen_UNet-PyTorch/unet.py: 69-101
def forward(self, image):
# encoder
x1 = self.down_conv_1(image)
x2 = self.max_pool2(x1)
x3 = self.down_conv_2(x2)
x4 = self.max_pool2(x3)
x5 = self.down_conv_3(x4)
x6 = self.max_pool2(x5)
x7 = self.down_conv_4(x6)
x8 = self.max_pool2(x7)
x9 = self.down_conv_5(x8)
# decoder
x = self.up_trans_1(x9)
y = crop_img(x7, x)
x = self.up_conv_1(torch.cat([x,y], 1))
x = self.up_trans_2(x)
y = crop_img(x5, x)
x = self.up_conv_2(torch.cat([x,y], 1))
x = self.up_trans_3(x)
y = crop_img(x3, x)
x = self.up_conv_3(torch.cat([x,y], 1))
x = self.up_trans_4(x)
y = crop_img(x1, x)
x = self.up_conv_4(torch.cat([x,y], 1))
x = self.out_conv(x)
return x
|
GHData/chuanli11_WCT-PyTorch/ae.py: 598-645
def forward(self,x):
out = self.conv1(x)
out = self.reflecPad1(out)
out = self.conv2(out)
out = self.relu2(out)
out = self.reflecPad3(out)
out = self.conv3(out)
out = self.relu3(out)
out,pool_idx = self.maxPool(out)
out = self.reflecPad4(out)
out = self.conv4(out)
out = self.relu4(out)
out = self.reflecPad5(out)
out = self.conv5(out)
out = self.relu5(out)
out,pool_idx2 = self.maxPool2(out)
out = self.reflecPad6(out)
out = self.conv6(out)
out = self.relu6(out)
out = self.reflecPad7(out)
out = self.conv7(out)
out = self.relu7(out)
out = self.reflecPad8(out)
out = self.conv8(out)
out = self.relu8(out)
out = self.reflecPad9(out)
out = self.conv9(out)
out = self.relu9(out)
out,pool_idx3 = self.maxPool3(out)
out = self.reflecPad10(out)
out = self.conv10(out)
out = self.relu10(out)
out = self.reflecPad11(out)
out = self.conv11(out)
out = self.relu11(out)
out = self.reflecPad12(out)
out = self.conv12(out)
out = self.relu12(out)
out = self.reflecPad13(out)
out = self.conv13(out)
out = self.relu13(out)
out,pool_idx4 = self.maxPool4(out)
out = self.reflecPad14(out)
out = self.conv14(out)
out = self.relu14(out)
return out
|
GHData/Jiangshan00001_lanenet_test/ENet.py: 362-430
def forward(self, x):
# TODO
# Initial block
##256x512
input_size = x.size()
##batch_size, 16, 128x256
x = self.initial_block(x)
# Stage 1 - Encoder-share
##64x128
stage1_input_size = x.size()
x, max_indices1_0 = self.downsample1_0(x)
#->2,64,64,128
x = self.regular1_1(x)
x = self.regular1_2(x)
x = self.regular1_3(x)
x = self.regular1_4(x)
# Stage 2 - Encoder -share
##2,128,32,64
stage2_input_size = x.size()
x, max_indices2_0 = self.downsample2_0(x)
x = self.regular2_1(x)
x = self.dilated2_2(x)
x = self.asymmetric2_3(x)
x = self.dilated2_4(x)
x = self.regular2_5(x)
x = self.dilated2_6(x)
x = self.asymmetric2_7(x)
x = self.dilated2_8(x)
# Stage 3 - Encoder
##2,128, 32x64
b_x = self.b_regular3_0(x)
b_x = self.b_dilated3_1(b_x)
b_x = self.b_asymmetric3_2(b_x)
b_x = self.b_dilated3_3(b_x)
b_x = self.b_regular3_4(b_x)
b_x = self.b_dilated3_5(b_x)
b_x = self.b_asymmetric3_6(b_x)
b_x = self.b_dilated3_7(b_x)
e_x = self.e_regular3_0(x)
e_x = self.e_dilated3_1(e_x)
e_x = self.e_asymmetric3_2(e_x)
e_x = self.e_dilated3_3(e_x)
e_x = self.e_regular3_4(e_x)
e_x = self.e_dilated3_5(e_x)
e_x = self.e_asymmetric3_6(e_x)
e_x = self.e_dilated3_7(e_x)
# binary branch 2,64,64,128
x_binary = self.upsample_binary_4_0(b_x, max_indices2_0, output_size=stage2_input_size)
x_binary = self.regular_binary_4_1(x_binary)
x_binary = self.regular_binary_4_2(x_binary)
x_binary = self.upsample_binary_5_0(x_binary, max_indices1_0, output_size=stage1_input_size)# 2,16,128,256
x_binary = self.regular_binary_5_1(x_binary)
binary_final_logits = self.binary_transposed_conv(x_binary, output_size=input_size)#2,1,256,512
# embedding branch
x_embedding = self.upsample_embedding_4_0(e_x, max_indices2_0, output_size=stage2_input_size)
x_embedding = self.regular_embedding_4_1(x_embedding)
x_embedding = self.regular_embedding_4_2(x_embedding)
x_embedding = self.upsample_embedding_5_0(x_embedding, max_indices1_0, output_size=stage1_input_size)
x_embedding = self.regular_embedding_5_1(x_embedding)
instance_notfinal_logits = self.embedding_transposed_conv(x_embedding, output_size=input_size)
return binary_final_logits, instance_notfinal_logits
|
GHData/1zuu_Deep-Noise-Suppression-using-GANs-PyTorch/generator.py: 121-208
def forward(self, x, z):
"""
Forward pass of generator.
Args:
x: input batch (signal)
z: latent vector
"""
enc1 = self.encoder1(x)
enc1_act = self.encoder1_act(enc1)
enc2 = self.encoder2(enc1_act)
enc2_act = self.encoder2_act(enc2)
enc3 = self.encoder3(enc2_act)
enc3_act = self.encoder3_act(enc3)
enc4 = self.encoder4(enc3_act)
enc4_act = self.encoder4_act(enc4)
enc5 = self.encoder5(enc4_act)
enc5_act = self.encoder5_act(enc5)
enc6 = self.encoder6(enc5_act)
enc6_act = self.encoder6_act(enc6)
enc7 = self.encoder7(enc6_act)
enc7_act = self.encoder7_act(enc7)
enc8 = self.encoder8(enc7_act)
enc8_act = self.encoder8_act(enc8)
enc9 = self.encoder9(enc8_act)
enc9_act = self.encoder9_act(enc9)
enc10 = self.encoder10(enc9_act)
enc10_act = self.encoder10_act(enc10)
enc11 = self.encoder11(enc10_act)
enc11_act = self.encoder11_act(enc11)
encoder_out = torch.cat((enc11_act, z), dim=1)
dec10 = self.decoder10(encoder_out)
dec10_cat = torch.cat((dec10, enc10), dim=1)
dec10_act = self.decoder10_act(dec10_cat)
dec9 = self.decoder9(dec10_act)
dec9_cat = torch.cat((dec9, enc9), dim=1)
dec9_act = self.decoder9_act(dec9_cat)
dec8 = self.decoder8(dec9_act)
dec8_cat = torch.cat((dec8, enc8), dim=1)
dec8_act = self.decoder8_act(dec8_cat)
dec7 = self.decoder7(dec8_act)
dec7_cat = torch.cat((dec7, enc7), dim=1)
dec7_act = self.decoder7_act(dec7_cat)
dec6 = self.decoder6(dec7_act)
dec6_cat = torch.cat((dec6, enc6), dim=1)
dec6_act = self.decoder6_act(dec6_cat)
dec5 = self.decoder5(dec6_act)
dec5_cat = torch.cat((dec5, enc5), dim=1)
dec5_act = self.decoder5_act(dec5_cat)
dec4 = self.decoder4(dec5_act)
dec4_cat = torch.cat((dec4, enc4), dim=1)
dec4_act = self.decoder4_act(dec4_cat)
dec3 = self.decoder3(dec4_act)
dec3_cat = torch.cat((dec3, enc3), dim=1)
dec3_act = self.decoder3_act(dec3_cat)
dec2 = self.decoder2(dec3_act)
dec2_cat = torch.cat((dec2, enc2), dim=1)
dec2_act = self.decoder2_act(dec2_cat)
dec1 = self.decoder1(dec2_act)
dec1_cat = torch.cat((dec1, enc1), dim=1)
dec1_act = self.decoder1_act(dec1_cat)
dec_final = self.decoder_out(dec1_act)
output = self.decoder_out_act(dec_final)
return output
|
GHData/DableUTeeF_HiResTorch/hardcodedmodels.py: 271-315
def forward(self, x):
out = self.conv1(x)
out = self.bn1(out)
out = F.relu(out)
path = F.avg_pool2d(out, 28)
out = self.layer1_1(out)
path += F.avg_pool2d(out, 28)
out = F.relu(out)
out = self.layer1_2(out)
path += F.avg_pool2d(out, 28)
out = F.relu(out)
out = self.layer1_3(out)
# path += F.avg_pool2d(out, 28)
path = torch.cat((path, F.avg_pool2d(out, 28)), 1)
out = F.relu(out)
out = F.max_pool2d(out, 2)
out = self.layer2_1(out)
path += F.avg_pool2d(out, 14)
out = F.relu(out)
out = self.layer2_2(out)
path += F.avg_pool2d(out, 14)
out = F.relu(out)
out = self.layer2_3(out)
# path += F.avg_pool2d(out, 14)
path = torch.cat((path, F.avg_pool2d(out, 14)), 1)
out = F.relu(out)
# path += F.avg_pool2d(out, 14)
out = F.max_pool2d(out, 2)
out = self.layer3_1(out)
path += F.avg_pool2d(out, 7)
out = F.relu(out)
out = self.layer3_2(out)
path += F.avg_pool2d(out, 7)
out = F.relu(out)
out = self.layer3_3(out)
# path = torch.cat((path, F.avg_pool2d(out, 7)), 1)
path += F.avg_pool2d(out, 7)
path = F.relu(path)
# out = F.avg_pool2d(path, 4)
out = path.view(path.size(0), -1)
out = self.linear(out)
return out
|
GHData/AdarshK1_meam517_final/model.py: 88-103
def forward(self, map):
map = F.relu((self.conv1(map)))
map = F.relu((self.conv2(map)))
out = torch.flatten(map, start_dim=1)
out = F.relu(self.fcn_1(out))
out = F.relu(self.fcn_2(out))
out = F.relu(self.fcn_3(out))
out = F.relu(self.fcn_4(out))
out = F.relu(self.fcn_5(out))
out = F.sigmoid(self.fcn_6(out))
return out
|
GHData/atranitell_Synchronized-BatchNorm-PyTorch-Horovod/sync_bn_horovod.py: 75-86
def forward(self, x):
out = F.relu(self.bn(self.conv1(x)))
out = F.max_pool2d(out, 2)
out = F.relu(self.bn1(self.conv2(out)))
out = F.max_pool2d(out, 2)
out = out.view(out.size(0), -1)
out = F.relu(self.fc1(out))
out = F.relu(self.fc2(out))
out = self.fc3(out)
return out
|
GHData/koptelovmax_PyTorch-DL/autoencoder.py: 28-39
def forward(self, x):
x = F.relu(self.encL1(x))
x = F.relu(self.encL2(x))
x = F.relu(self.encL3(x))
x = F.relu(self.encL4(x))
x = F.relu(self.decL1(x))
x = F.relu(self.decL2(x))
x = F.relu(self.decL3(x))
x = F.relu(self.decL4(x))
return x
#%%
# Define a model and an optimizer:
|
GHData/Lornatang_DRLN-PyTorch/model.py: 118-133
def forward(self, x: torch.Tensor) -> torch.Tensor:
residual_layer1 = self.residual_layer1(x)
out1 = torch.cat([x, residual_layer1], 1)
residual_layer2 = self.residual_layer2(out1)
cat_out2 = torch.cat([out1, residual_layer2], 1)
residual_layer3 = self.residual_layer3(cat_out2)
cat_out3 = torch.cat([cat_out2, residual_layer3], 1)
out = self.conv_layer(cat_out3)
out = self.laplacian_attention_layer(out)
return out
|
GHData/Lornatang_RCAN-PyTorch/model.py: 67-81
def forward(self, x: Tensor) -> Tensor:
x = x.sub_(self.mean).mul_(1.)
conv1 = self.conv1(x)
x = self.trunk(conv1)
x = self.conv2(x)
x = torch.add(x, conv1)
x = self.upsampling(x)
x = self.conv3(x)
x = x.div_(1.).add_(self.mean)
return x
|
GHData/DableUTeeF_HiResTorch/hardcodedmodels.py: 88-126
def forward(self, x):
out = self.conv1(x)
out = self.bn1(out)
out = F.relu(out)
path = F.avg_pool2d(out, 28)
out = self.layer1_1(out)
path += F.avg_pool2d(out, 28)
out = self.layer1_2(out)
path += F.avg_pool2d(out, 28)
out = self.layer1_3(out)
# path += F.avg_pool2d(out, 28)
path = torch.cat((path, F.avg_pool2d(out, 28)), 1)
out = F.max_pool2d(out, 2)
out = self.layer2_1(out)
path += F.avg_pool2d(out, 14)
out = self.layer2_2(out)
path += F.avg_pool2d(out, 14)
out = self.layer2_3(out)
# path += F.avg_pool2d(out, 14)
path = torch.cat((path, F.avg_pool2d(out, 14)), 1)
# path += F.avg_pool2d(out, 14)
out = F.max_pool2d(out, 2)
out = self.layer3_1(out)
path += F.avg_pool2d(out, 7)
out = self.layer3_2(out)
path += F.avg_pool2d(out, 7)
out = self.layer3_3(out)
# path += F.avg_pool2d(out, 7)
path = torch.cat((path, F.avg_pool2d(out, 7)), 1)
# path += F.avg_pool2d(out, 7)
# out = F.avg_pool2d(path, 4)
out = path.view(path.size(0), -1)
out = self.linear(out)
return out
|
GHData/DableUTeeF_HiResTorch/hardcodedmodels.py: 144-181
def forward(self, x):
out = self.conv1(x)
out = self.bn1(out)
out = F.relu(out)
path = F.avg_pool2d(out, 28)
out = self.layer1_1(out)
path += F.avg_pool2d(out, 28)
out = self.layer1_2(out)
path += F.avg_pool2d(out, 28)
out = self.layer1_3(out)
# path += F.avg_pool2d(out, 28)
path = torch.cat((path, F.avg_pool2d(out, 28)), 1)
out = F.max_pool2d(out, 2)
out = self.layer2_1(out)
path += F.avg_pool2d(out, 14)
out = self.layer2_2(out)
path += F.avg_pool2d(out, 14)
out = self.layer2_3(out)
# path += F.avg_pool2d(out, 14)
path = torch.cat((path, F.avg_pool2d(out, 14)), 1)
# path += F.avg_pool2d(out, 14)
out = F.max_pool2d(out, 2)
out = self.layer3_1(out)
path += F.avg_pool2d(out, 7)
out = self.layer3_2(out)
path += F.avg_pool2d(out, 7)
out = self.layer3_3(out)
# path = torch.cat((path, F.avg_pool2d(out, 7)), 1)
path += F.avg_pool2d(out, 7)
# out = F.avg_pool2d(path, 4)
out = path.view(path.size(0), -1)
out = self.linear(out)
return out
|
GHData/PacktPublishing_PyTorch-Deep-Learning-in-7-Days/4-4.py: 66-79
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
self.after_conv1 = x
x = self.pool(F.relu(self.conv2(x)))
self.after_conv2 = x
x = x.flatten(start_dim=1)
# this is a good place to see the size for debugging
# print(x.shape)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
|
GHData/Shrey-Viradiya_White-Box-Attack-PyTorch/train.py: 28-39
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = self.pool(F.relu(self.conv3(x)))
x = self.pool(F.relu(self.conv4(x)))
# print(x.shape)
x = x.reshape(-1, 128 * 5 * 5)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
|
GHData/bin-go2011_PyTorch-Deep-Learning-in-7-Days/4-4.py: 66-79
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
self.after_conv1 = x
x = self.pool(F.relu(self.conv2(x)))
self.after_conv2 = x
x = x.flatten(start_dim=1)
# this is a good place to see the size for debugging
# print(x.shape)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
|
GHData/Hung-Ta-Chen_Implement-CNN-for-Image-Recognition-with-PyTorch/cnn_cifar.py: 38-66
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
#print(x.shape)
#x = self.conv1(x)
#print(x.shape)
#x = F.relu(x)
#print(x.shape)
#x = self.pool(x)
#print(x.shape)
x = self.pool(F.relu(self.conv2(x)))
#x = self.conv2(x)
#print(x.shape)
#x = F.relu(x)
#print(x.shape)
#x = self.pool(x)
#print(x.shape)
x = self.dropout1(x)
x = torch.flatten(x, 1)
#print(x.shape)
x = F.relu(self.fc1(x))
#print(x.shape)
x = F.relu(self.fc2(x))
#print(x.shape)
x = self.dropout2(x)
x = F.relu(self.fc3(x))
output = F.log_softmax(x, dim=1)
#print(output.shape)
return output
|
GHData/kevincao91_PyTorch_Demo/my_class.py: 64-76
def forward(self, x):
x = self.pool1(F.relu(self.conv1(x)))
x = self.pool2(F.relu(self.conv2(x)))
x = self.pool3(F.relu(self.conv3(x)))
x = self.pool4(F.relu(self.conv4(x)))
x = x.view(-1, 64 * 2 * 2)
x = F.relu(self.fc1(x))
x = self.fc1_dropout(x)
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
# 定义权值初始化
|
GHData/maggeek_Transfer-Learning-PyTorch/model.py: 28-38
def forward(self, x):
# 6 x Conv + Batchnorm + Activation
x1 = self.lrelu(self.bn1(self.conv1(x)))
x2 = self.lrelu(self.bn2(self.conv2(x1)))
x3 = self.lrelu(self.bn3(self.conv3(x2)))
x4 = self.lrelu(self.bn4(self.conv4(x3)))
x5 = self.lrelu(self.bn5(self.conv5(x4)))
x6 = self.lrelu(self.bn6(self.conv6(x5)))
x_flat = x6.view(x6.shape[0], x6.shape[1])
x_out = self.sigmoid(self.fc(x_flat))
return x_out
|
GHData/MayankSingal_PyTorch-Pixel-Level-Domain-Transfer/model.py: 47-64
def forward(self, X):
X = self.leakyrelu(self.conv1(X))
X = self.leakyrelu(self.bn2(self.conv2(X)))
X = self.leakyrelu(self.bn3(self.conv3(X)))
X = self.leakyrelu(self.bn4(self.conv4(X)))
X = self.relu(self.bn_deconv1(self.deconv1(X)))
X = self.relu(self.bn_deconv2(self.deconv2(X)))
X = self.relu(self.bn_deconv3(self.deconv3(X)))
X = self.tanh(self.deconv4(X))
#print(X)
#print(X.shape)
#brak
return X
|
GHData/vprayagala_PyTorch/Classifier_CIFAR_Images.py: 131-150
def forward(self, x):
# add sequence of convolutional and max pooling layers
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = self.pool(F.relu(self.conv3(x)))
# flatten image input
x = x.view(-1, 64 * 4 * 4)
# add dropout layer
x = self.dropout(x)
# add 1st hidden layer, with relu activation function
x = F.relu(self.fc1(x))
# add dropout layer
x = self.dropout(x)
# add 2nd hidden layer, with relu activation function
x = self.fc2(x)
return x
#%%
# create a complete CNN
|
GHData/kevincao91_PyTorch_Demo/my_class.py: 193-205
def forward(self, x):
x = self.pool1(F.relu(self.conv1(x)))
x = self.pool2(F.relu(self.conv2(x)))
x = self.pool3(F.relu(self.conv3(x)))
x = self.pool4(F.relu(self.conv4(x)))
x = x.view(-1, 256 * 2 * 2)
x = F.relu(self.fc1(x))
x = self.fc1_dropout(x)
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
# 定义权值初始化
|
GHData/kevincao91_PyTorch_Demo/my_class.py: 150-162
def forward(self, x):
x = self.pool1(F.relu(self.conv1(x)))
x = self.pool2(F.relu(self.conv2(x)))
x = self.pool3(F.relu(self.conv3(x)))
x = self.pool4(F.relu(self.conv4(x)))
x = x.view(-1, 128 * 2 * 2)
x = F.relu(self.fc1(x))
x = self.fc1_dropout(x)
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
# 定义权值初始化
|
GHData/YZJ6GitHub_PyTorch_Learing/cifar_Classification.py: 83-92
def forward(self,x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = self.pool(F.relu(self.conv3(x)))
x = x.view(-1,64 * 4 * 4)
x = self.dropout(x)
x = F.relu(self.fc1(x))
x = self.dropout(x)
x = F.relu(self.fc2(x))
return x
|
GHData/kevincao91_PyTorch_Demo/my_class.py: 107-119
def forward(self, x):
x = self.pool1(F.relu(self.conv1(x)))
x = self.pool2(F.relu(self.conv2(x)))
x = self.pool3(F.relu(self.conv3(x)))
x = self.pool4(F.relu(self.conv4(x)))
x = x.view(-1, 96 * 2 * 2)
x = F.relu(self.fc1(x))
x = self.fc1_dropout(x)
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
# 定义权值初始化
|
GHData/ChunjieShan_PyTorch-baseline/net.py: 29-44
def forward(self, x):
x = F.relu(self.bn1(self.conv1(x)))
x = F.relu(self.bn2(self.conv2(x)))
x = F.relu(self.bn3(self.conv3(x)))
x = F.relu(self.bn4(self.conv4(x)))
x = F.relu(self.bn5(self.conv5(x)))
x = F.relu(self.bn6(self.conv6(x)))
x = F.relu(self.bn7(self.conv7(x)))
# print("X shape: ", x.shape)
x = x.view(-1, 128 * 5 * 5)
x = F.relu(self.fc1(x))
x = F.sigmoid(self.fc2(x))
return x
|
GHData/koptelovmax_PyTorch-DL/alexnet.py: 30-44
def forward(self, x):
x = self.pool1(F.relu(self.conv1(x)))
x = self.pool2(F.relu(self.conv2(x)))
x = F.relu(self.conv3(x))
x = F.relu(self.conv4(x))
x = self.pool3(F.relu(self.conv5(x)))
x = x.view(-1, 256 * 5 * 5) # flattening
x = self.drop(x) # dropout to prevent overfitting
x = F.relu(self.fc1(x))
x = self.drop(x) # dropout to prevent overfitting
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
# Based on the reference class above define the one adapted to the CIFAR10 dataset:
|
GHData/koptelovmax_PyTorch-DL/alexnet.py: 61-76
def forward(self, x):
x = self.pool1(F.relu(self.conv1(x)))
x = self.pool2(F.relu(self.conv2(x)))
x = F.relu(self.conv3(x))
x = F.relu(self.conv4(x))
x = self.pool3(F.relu(self.conv5(x)))
x = x.view(-1, 256 * 2 * 2) # flattening
x = self.drop(x) # dropout to prevent overfitting
x = F.relu(self.fc1(x))
x = self.drop(x) # dropout to prevent overfitting
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
#%%
# Define a model:
|
GHData/otenim_GLCIC-PyTorch/models.py: 195-208
def forward(self, x):
x = self.bn1(self.act1(self.conv1(x)))
x = self.bn2(self.act2(self.conv2(x)))
x = self.bn3(self.act3(self.conv3(x)))
x = self.bn4(self.act4(self.conv4(x)))
x = self.bn5(self.act5(self.conv5(x)))
if self.arc == 'celeba':
x = self.act6(self.linear6(self.flatten6(x)))
elif self.arc == 'places2':
x = self.bn6(self.act6(self.conv6(x)))
x = self.act7(self.linear7(self.flatten7(x)))
return x
|
|